Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 84210aeb authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'drm-radeon-kms' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (35 commits)
  drm/radeon: set fb aperture sizes for framebuffer handoff.
  drm/ttm: fix highuser vs dma32 confusion.
  drm/radeon: Fix size used for benchmarking BO copies.
  drm/radeon: Add radeon.test parameter for running BO GPU copy tests.
  drm/radeon/kms: allow interruptible waits for objects.
  drm/ttm: powerpc: Fix Highmem cache flushing.
  x86: Export kmap_atomic_prot() needed for TTM.
  drm/ttm: Fix ttm in-kernel copying of pages with non-standard caching attributes.
  drm/ttm: Fix an oops and sync object leak.
  drm/radeon/kms: vram sizing on certain r100 chips needs workaround.
  drm/radeon: Pay more attention to object placement requested by userspace.
  drm/radeon: Fall back to evicting BOs with memcpy if necessary.
  drm/radeon: Don't unreserve twice on failure to validate.
  drm/radeon/kms: fix bandwidth computation on avivo hardware
  drm/radeon/kms: add initial colortiling support.
  drm/radeon/kms: fix hotspot handling on pre-avivo chips
  drm/radeon/kms: enable frac fb divs on rs600/rs690/rs740
  drm/radeon/kms: add PLL flag to prefer frequencies <= the target freq
  drm/radeon/kms: block RN50 from using 3D engine.
  drm/radeon/kms: fix VRAM sizing like DDX does it.
  ...
parents 7d4dd028 ed8f0d9e
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -103,6 +103,7 @@ EXPORT_SYMBOL(kmap);
EXPORT_SYMBOL(kunmap);
EXPORT_SYMBOL(kunmap);
EXPORT_SYMBOL(kmap_atomic);
EXPORT_SYMBOL(kmap_atomic);
EXPORT_SYMBOL(kunmap_atomic);
EXPORT_SYMBOL(kunmap_atomic);
EXPORT_SYMBOL(kmap_atomic_prot);


void __init set_highmem_pages_init(void)
void __init set_highmem_pages_init(void)
{
{
+2 −1
Original line number Original line Diff line number Diff line
@@ -13,7 +13,8 @@ radeon-$(CONFIG_DRM_RADEON_KMS) += radeon_device.o radeon_kms.o \
	radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \
	radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \
	radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
	radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
	radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
	radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
	rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o
	rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o \
	radeon_test.o


radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o


+146 −147
Original line number Original line Diff line number Diff line
@@ -31,6 +31,132 @@
#include "atom.h"
#include "atom.h"
#include "atom-bits.h"
#include "atom-bits.h"


static void atombios_overscan_setup(struct drm_crtc *crtc,
				    struct drm_display_mode *mode,
				    struct drm_display_mode *adjusted_mode)
{
	struct drm_device *dev = crtc->dev;
	struct radeon_device *rdev = dev->dev_private;
	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
	SET_CRTC_OVERSCAN_PS_ALLOCATION args;
	int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
	int a1, a2;

	memset(&args, 0, sizeof(args));

	args.usOverscanRight = 0;
	args.usOverscanLeft = 0;
	args.usOverscanBottom = 0;
	args.usOverscanTop = 0;
	args.ucCRTC = radeon_crtc->crtc_id;

	switch (radeon_crtc->rmx_type) {
	case RMX_CENTER:
		args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
		args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
		args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
		args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
		break;
	case RMX_ASPECT:
		a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
		a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;

		if (a1 > a2) {
			args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
			args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
		} else if (a2 > a1) {
			args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
			args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
		}
		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
		break;
	case RMX_FULL:
	default:
		args.usOverscanRight = 0;
		args.usOverscanLeft = 0;
		args.usOverscanBottom = 0;
		args.usOverscanTop = 0;
		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
		break;
	}
}

static void atombios_scaler_setup(struct drm_crtc *crtc)
{
	struct drm_device *dev = crtc->dev;
	struct radeon_device *rdev = dev->dev_private;
	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
	ENABLE_SCALER_PS_ALLOCATION args;
	int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
	/* fixme - fill in enc_priv for atom dac */
	enum radeon_tv_std tv_std = TV_STD_NTSC;

	if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
		return;

	memset(&args, 0, sizeof(args));

	args.ucScaler = radeon_crtc->crtc_id;

	if (radeon_crtc->devices & (ATOM_DEVICE_TV_SUPPORT)) {
		switch (tv_std) {
		case TV_STD_NTSC:
		default:
			args.ucTVStandard = ATOM_TV_NTSC;
			break;
		case TV_STD_PAL:
			args.ucTVStandard = ATOM_TV_PAL;
			break;
		case TV_STD_PAL_M:
			args.ucTVStandard = ATOM_TV_PALM;
			break;
		case TV_STD_PAL_60:
			args.ucTVStandard = ATOM_TV_PAL60;
			break;
		case TV_STD_NTSC_J:
			args.ucTVStandard = ATOM_TV_NTSCJ;
			break;
		case TV_STD_SCART_PAL:
			args.ucTVStandard = ATOM_TV_PAL; /* ??? */
			break;
		case TV_STD_SECAM:
			args.ucTVStandard = ATOM_TV_SECAM;
			break;
		case TV_STD_PAL_CN:
			args.ucTVStandard = ATOM_TV_PALCN;
			break;
		}
		args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
	} else if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT)) {
		args.ucTVStandard = ATOM_TV_CV;
		args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
	} else {
		switch (radeon_crtc->rmx_type) {
		case RMX_FULL:
			args.ucEnable = ATOM_SCALER_EXPANSION;
			break;
		case RMX_CENTER:
			args.ucEnable = ATOM_SCALER_CENTER;
			break;
		case RMX_ASPECT:
			args.ucEnable = ATOM_SCALER_EXPANSION;
			break;
		default:
			if (ASIC_IS_AVIVO(rdev))
				args.ucEnable = ATOM_SCALER_DISABLE;
			else
				args.ucEnable = ATOM_SCALER_CENTER;
			break;
		}
	}
	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
	if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)
	    && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) {
		atom_rv515_force_tv_scaler(rdev);
	}
}

static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
{
{
	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -203,6 +329,12 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
	if (ASIC_IS_AVIVO(rdev)) {
	if (ASIC_IS_AVIVO(rdev)) {
		uint32_t ss_cntl;
		uint32_t ss_cntl;


		if ((rdev->family == CHIP_RS600) ||
		    (rdev->family == CHIP_RS690) ||
		    (rdev->family == CHIP_RS740))
			pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
				      RADEON_PLL_PREFER_CLOSEST_LOWER);

		if (ASIC_IS_DCE32(rdev) && mode->clock > 200000)	/* range limits??? */
		if (ASIC_IS_DCE32(rdev) && mode->clock > 200000)	/* range limits??? */
			pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
			pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
		else
		else
@@ -321,7 +453,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
	struct drm_gem_object *obj;
	struct drm_gem_object *obj;
	struct drm_radeon_gem_object *obj_priv;
	struct drm_radeon_gem_object *obj_priv;
	uint64_t fb_location;
	uint64_t fb_location;
	uint32_t fb_format, fb_pitch_pixels;
	uint32_t fb_format, fb_pitch_pixels, tiling_flags;


	if (!crtc->fb)
	if (!crtc->fb)
		return -EINVAL;
		return -EINVAL;
@@ -358,7 +490,14 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
		return -EINVAL;
		return -EINVAL;
	}
	}


	/* TODO tiling */
	radeon_object_get_tiling_flags(obj->driver_private,
				       &tiling_flags, NULL);
	if (tiling_flags & RADEON_TILING_MACRO)
		fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;

	if (tiling_flags & RADEON_TILING_MICRO)
		fb_format |= AVIVO_D1GRPH_TILED;

	if (radeon_crtc->crtc_id == 0)
	if (radeon_crtc->crtc_id == 0)
		WREG32(AVIVO_D1VGA_CONTROL, 0);
		WREG32(AVIVO_D1VGA_CONTROL, 0);
	else
	else
@@ -509,6 +648,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
		radeon_crtc_set_base(crtc, x, y, old_fb);
		radeon_crtc_set_base(crtc, x, y, old_fb);
		radeon_legacy_atom_set_surface(crtc);
		radeon_legacy_atom_set_surface(crtc);
	}
	}
	atombios_overscan_setup(crtc, mode, adjusted_mode);
	atombios_scaler_setup(crtc);
	radeon_bandwidth_update(rdev);
	return 0;
	return 0;
}
}


@@ -516,6 +658,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
				     struct drm_display_mode *mode,
				     struct drm_display_mode *mode,
				     struct drm_display_mode *adjusted_mode)
				     struct drm_display_mode *adjusted_mode)
{
{
	if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
		return false;
	return true;
	return true;
}
}


@@ -548,148 +692,3 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
		    AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
		    AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
	drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
	drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
}
}

void radeon_init_disp_bw_avivo(struct drm_device *dev,
			       struct drm_display_mode *mode1,
			       uint32_t pixel_bytes1,
			       struct drm_display_mode *mode2,
			       uint32_t pixel_bytes2)
{
	struct radeon_device *rdev = dev->dev_private;
	fixed20_12 min_mem_eff;
	fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff;
	fixed20_12 sclk_ff, mclk_ff;
	uint32_t dc_lb_memory_split, temp;

	min_mem_eff.full = rfixed_const_8(0);
	if (rdev->disp_priority == 2) {
		uint32_t mc_init_misc_lat_timer = 0;
		if (rdev->family == CHIP_RV515)
			mc_init_misc_lat_timer =
			    RREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER);
		else if (rdev->family == CHIP_RS690)
			mc_init_misc_lat_timer =
			    RREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER);

		mc_init_misc_lat_timer &=
		    ~(R300_MC_DISP1R_INIT_LAT_MASK <<
		      R300_MC_DISP1R_INIT_LAT_SHIFT);
		mc_init_misc_lat_timer &=
		    ~(R300_MC_DISP0R_INIT_LAT_MASK <<
		      R300_MC_DISP0R_INIT_LAT_SHIFT);

		if (mode2)
			mc_init_misc_lat_timer |=
			    (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
		if (mode1)
			mc_init_misc_lat_timer |=
			    (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);

		if (rdev->family == CHIP_RV515)
			WREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER,
				  mc_init_misc_lat_timer);
		else if (rdev->family == CHIP_RS690)
			WREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER,
				  mc_init_misc_lat_timer);
	}

	/*
	 * determine is there is enough bw for current mode
	 */
	temp_ff.full = rfixed_const(100);
	mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
	mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
	sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
	sclk_ff.full = rfixed_div(sclk_ff, temp_ff);

	temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
	temp_ff.full = rfixed_const(temp);
	mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
	mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);

	pix_clk.full = 0;
	pix_clk2.full = 0;
	peak_disp_bw.full = 0;
	if (mode1) {
		temp_ff.full = rfixed_const(1000);
		pix_clk.full = rfixed_const(mode1->clock);	/* convert to fixed point */
		pix_clk.full = rfixed_div(pix_clk, temp_ff);
		temp_ff.full = rfixed_const(pixel_bytes1);
		peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
	}
	if (mode2) {
		temp_ff.full = rfixed_const(1000);
		pix_clk2.full = rfixed_const(mode2->clock);	/* convert to fixed point */
		pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
		temp_ff.full = rfixed_const(pixel_bytes2);
		peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
	}

	if (peak_disp_bw.full >= mem_bw.full) {
		DRM_ERROR
		    ("You may not have enough display bandwidth for current mode\n"
		     "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
		printk("peak disp bw %d, mem_bw %d\n",
		       rfixed_trunc(peak_disp_bw), rfixed_trunc(mem_bw));
	}

	/*
	 * Line Buffer Setup
	 * There is a single line buffer shared by both display controllers.
	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between the display
	 * controllers.  The paritioning can either be done manually or via one of four
	 * preset allocations specified in bits 1:0:
	 * 0 - line buffer is divided in half and shared between each display controller
	 * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
	 * 2 - D1 gets the whole buffer
	 * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
	 * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual allocation mode.
	 * In manual allocation mode, D1 always starts at 0, D1 end/2 is specified in bits
	 * 14:4; D2 allocation follows D1.
	 */

	/* is auto or manual better ? */
	dc_lb_memory_split =
	    RREG32(AVIVO_DC_LB_MEMORY_SPLIT) & ~AVIVO_DC_LB_MEMORY_SPLIT_MASK;
	dc_lb_memory_split &= ~AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
#if 1
	/* auto */
	if (mode1 && mode2) {
		if (mode1->hdisplay > mode2->hdisplay) {
			if (mode1->hdisplay > 2560)
				dc_lb_memory_split |=
				    AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
			else
				dc_lb_memory_split |=
				    AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
		} else if (mode2->hdisplay > mode1->hdisplay) {
			if (mode2->hdisplay > 2560)
				dc_lb_memory_split |=
				    AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
			else
				dc_lb_memory_split |=
				    AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
		} else
			dc_lb_memory_split |=
			    AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
	} else if (mode1) {
		dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY;
	} else if (mode2) {
		dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
	}
#else
	/* manual */
	dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
	dc_lb_memory_split &=
	    ~(AVIVO_DC_LB_DISP1_END_ADR_MASK <<
	      AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
	if (mode1) {
		dc_lb_memory_split |=
		    ((((mode1->hdisplay / 2) + 64) & AVIVO_DC_LB_DISP1_END_ADR_MASK)
		     << AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
	} else if (mode2) {
		dc_lb_memory_split |= (0 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
	}
#endif
	WREG32(AVIVO_DC_LB_MEMORY_SPLIT, dc_lb_memory_split);
}
+757 −13

File changed.

Preview size limit exceeded, changes collapsed.

+70 −8
Original line number Original line Diff line number Diff line
@@ -30,6 +30,8 @@
#include "drm.h"
#include "drm.h"
#include "radeon_reg.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon.h"
#include "radeon_drm.h"
#include "radeon_share.h"


/* r300,r350,rv350,rv370,rv380 depends on : */
/* r300,r350,rv350,rv370,rv380 depends on : */
void r100_hdp_reset(struct radeon_device *rdev);
void r100_hdp_reset(struct radeon_device *rdev);
@@ -44,6 +46,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev);
int r100_cs_packet_parse(struct radeon_cs_parser *p,
int r100_cs_packet_parse(struct radeon_cs_parser *p,
			 struct radeon_cs_packet *pkt,
			 struct radeon_cs_packet *pkt,
			 unsigned idx);
			 unsigned idx);
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
			      struct radeon_cs_reloc **cs_reloc);
			      struct radeon_cs_reloc **cs_reloc);
int r100_cs_parse_packet0(struct radeon_cs_parser *p,
int r100_cs_parse_packet0(struct radeon_cs_parser *p,
@@ -150,8 +153,13 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
	if (i < 0 || i > rdev->gart.num_gpu_pages) {
	if (i < 0 || i > rdev->gart.num_gpu_pages) {
		return -EINVAL;
		return -EINVAL;
	}
	}
	addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC;
	addr = (lower_32_bits(addr) >> 8) |
	writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4));
	       ((upper_32_bits(addr) & 0xff) << 24) |
	       0xc;
	/* on x86 we want this to be CPU endian, on powerpc
	 * on powerpc without HW swappers, it'll get swapped on way
	 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
	writel(addr, ((void __iomem *)ptr) + (i * 4));
	return 0;
	return 0;
}
}


@@ -579,10 +587,8 @@ void r300_vram_info(struct radeon_device *rdev)
	} else {
	} else {
		rdev->mc.vram_width = 64;
		rdev->mc.vram_width = 64;
	}
	}
	rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);


	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
	r100_vram_init_sizes(rdev);
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
}
}




@@ -970,7 +976,7 @@ static inline void r300_cs_track_clear(struct r300_cs_track *track)


static const unsigned r300_reg_safe_bm[159] = {
static const unsigned r300_reg_safe_bm[159] = {
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
	0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF,
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
@@ -1019,7 +1025,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
	struct radeon_cs_reloc *reloc;
	struct radeon_cs_reloc *reloc;
	struct r300_cs_track *track;
	struct r300_cs_track *track;
	volatile uint32_t *ib;
	volatile uint32_t *ib;
	uint32_t tmp;
	uint32_t tmp, tile_flags = 0;
	unsigned i;
	unsigned i;
	int r;
	int r;


@@ -1027,6 +1033,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
	ib_chunk = &p->chunks[p->chunk_ib_idx];
	ib_chunk = &p->chunks[p->chunk_ib_idx];
	track = (struct r300_cs_track*)p->track;
	track = (struct r300_cs_track*)p->track;
	switch(reg) {
	switch(reg) {
	case AVIVO_D1MODE_VLINE_START_END:
	case RADEON_CRTC_GUI_TRIG_VLINE:
		r = r100_cs_packet_parse_vline(p);
		if (r) {
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
					idx, reg);
			r100_cs_dump_packet(p, pkt);
			return r;
		}
		break;
	case RADEON_DST_PITCH_OFFSET:
	case RADEON_DST_PITCH_OFFSET:
	case RADEON_SRC_PITCH_OFFSET:
	case RADEON_SRC_PITCH_OFFSET:
		r = r100_cs_packet_next_reloc(p, &reloc);
		r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1038,7 +1054,19 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
		}
		}
		tmp = ib_chunk->kdata[idx] & 0x003fffff;
		tmp = ib_chunk->kdata[idx] & 0x003fffff;
		tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
		tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
		ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;

		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
			tile_flags |= RADEON_DST_TILE_MACRO;
		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
			if (reg == RADEON_SRC_PITCH_OFFSET) {
				DRM_ERROR("Cannot src blit from microtiled surface\n");
				r100_cs_dump_packet(p, pkt);
				return -EINVAL;
			}
			tile_flags |= RADEON_DST_TILE_MICRO;
		}
		tmp |= tile_flags;
		ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp;
		break;
		break;
	case R300_RB3D_COLOROFFSET0:
	case R300_RB3D_COLOROFFSET0:
	case R300_RB3D_COLOROFFSET1:
	case R300_RB3D_COLOROFFSET1:
@@ -1127,6 +1155,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
		/* RB3D_COLORPITCH1 */
		/* RB3D_COLORPITCH1 */
		/* RB3D_COLORPITCH2 */
		/* RB3D_COLORPITCH2 */
		/* RB3D_COLORPITCH3 */
		/* RB3D_COLORPITCH3 */
		r = r100_cs_packet_next_reloc(p, &reloc);
		if (r) {
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
				  idx, reg);
			r100_cs_dump_packet(p, pkt);
			return r;
		}

		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
			tile_flags |= R300_COLOR_TILE_ENABLE;
		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
			tile_flags |= R300_COLOR_MICROTILE_ENABLE;

		tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
		tmp |= tile_flags;
		ib[idx] = tmp;

		i = (reg - 0x4E38) >> 2;
		i = (reg - 0x4E38) >> 2;
		track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
		track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
		switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
		switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
@@ -1182,6 +1227,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
		break;
		break;
	case 0x4F24:
	case 0x4F24:
		/* ZB_DEPTHPITCH */
		/* ZB_DEPTHPITCH */
		r = r100_cs_packet_next_reloc(p, &reloc);
		if (r) {
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
				  idx, reg);
			r100_cs_dump_packet(p, pkt);
			return r;
		}

		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
			tile_flags |= R300_DEPTHMACROTILE_ENABLE;
		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
			tile_flags |= R300_DEPTHMICROTILE_TILED;;

		tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
		tmp |= tile_flags;
		ib[idx] = tmp;

		track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
		track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
		break;
		break;
	case 0x4104:
	case 0x4104:
Loading