Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 66fd7a66 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel into drm-next

drm-intel-next-2016-05-22:
- cmd-parser support for direct reg->reg loads (Ken Graunke)
- better handle DP++ smart dongles (Ville)
- bxt guc fw loading support (Nick Hoathe)
- remove a bunch of struct typedefs from dpll code (Ander)
- tons of small work all over to avoid casting between drm_device and the i915
  dev struct (Tvrtko&Chris)
- untangle request retiring from other operations, also fixes reset stat corner
  cases (Chris)
- skl atomic watermark support from Matt Roper, yay!
- various wm handling bugfixes from Ville
- big pile of cdclck rework for bxt/skl (Ville)
- CABC (Content Adaptive Brigthness Control) for dsi panels (Jani&Deepak M)
- nonblocking atomic commits for plane-only updates (Maarten Lankhorst)
- bunch of PSR fixes&improvements
- untangle our map/pin/sg_iter code a bit (Dave Gordon)
drm-intel-next-2016-05-08:
- refactor stolen quirks to share code between early quirks and i915 (Joonas)
- refactor gem BO/vma funcstion (Tvrtko&Dave)
- backlight over DPCD support (Yetunde Abedisi)
- more dsi panel sequence support (Jani)
- lots of refactoring around handling iomaps, vma, ring access and related
  topics culmulating in removing the duplicated request tracking in the execlist
  code (Chris & Tvrtko) includes a small patch for core iomapping code
- hw state readout for bxt dsi (Ramalingam C)
- cdclk cleanups (Ville)
- dedupe chv pll code a bit (Ander)
- enable semaphores on gen8+ for legacy submission, to be able to have a direct
  comparison against execlist on the same platform (Chris) Not meant to be used
  for anything else but performance tuning
- lvds border bit hw state checker fix (Jani)
- rpm vs. shrinker/oom-notifier fixes (Praveen Paneri)
- l3 tuning (Imre)
- revert mst dp audio, it's totally non-functional and crash-y (Lyude)
- first official dmc for kbl (Rodrigo)
- and tons of small things all over as usual

* 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel: (194 commits)
  drm/i915: Revert async unpin and nonblocking atomic commit
  drm/i915: Update DRIVER_DATE to 20160522
  drm/i915: Inline sg_next() for the optimised SGL iterator
  drm/i915: Introduce & use new lightweight SGL iterators
  drm/i915: optimise i915_gem_object_map() for small objects
  drm/i915: refactor i915_gem_object_pin_map()
  drm/i915/psr: Implement PSR2 w/a for gen9
  drm/i915/psr: Use ->get_aux_send_ctl functions
  drm/i915/psr: Order DP aux transactions correctly
  drm/i915/psr: Make idle_frames sensible again
  drm/i915/psr: Try to program link training times correctly
  drm/i915/userptr: Convert to drm_i915_private
  drm/i915: Allow nonblocking update of pageflips.
  drm/i915: Check for unpin correctness.
  Reapply "drm/i915: Avoid stalling on pending flips for legacy cursor updates"
  drm/i915: Make unpin async.
  drm/i915: Prepare connectors for nonblocking checks.
  drm/i915: Pass atomic states to fbc update functions.
  drm/i915: Remove reset_counter from intel_crtc.
  drm/i915: Remove queue_flip pointer.
  ...
parents 65439b68 e42aeef1
Loading
Loading
Loading
Loading
+194 −210
Original line number Diff line number Diff line
@@ -223,36 +223,19 @@ static void __init intel_remapping_check(int num, int slot, int func)
 * despite the efforts of the "RAM buffer" approach, which simply rounds
 * memory boundaries up to 64M to try to catch space that may decode
 * as RAM and so is not suitable for MMIO.
 *
 * And yes, so far on current devices the base addr is always under 4G.
 */
static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_size)
{
	u32 base;

	/*
	 * For the PCI IDs in this quirk, the stolen base is always
	 * in 0x5c, aka the BDSM register (yes that's really what
	 * it's called).
 */
	base = read_pci_config(num, slot, func, 0x5c);
	base &= ~((1<<20) - 1);

	return base;
}

#define KB(x)	((x) * 1024UL)
#define MB(x)	(KB (KB (x)))
#define GB(x)	(MB (KB (x)))

static size_t __init i830_tseg_size(void)
{
	u8 tmp = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);
	u8 esmramc = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);

	if (!(tmp & TSEG_ENABLE))
	if (!(esmramc & TSEG_ENABLE))
		return 0;

	if (tmp & I830_TSEG_SIZE_1M)
	if (esmramc & I830_TSEG_SIZE_1M)
		return MB(1);
	else
		return KB(512);
@@ -260,27 +243,26 @@ static size_t __init i830_tseg_size(void)

static size_t __init i845_tseg_size(void)
{
	u8 tmp = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);
	u8 esmramc = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);
	u8 tseg_size = esmramc & I845_TSEG_SIZE_MASK;

	if (!(tmp & TSEG_ENABLE))
	if (!(esmramc & TSEG_ENABLE))
		return 0;

	switch (tmp & I845_TSEG_SIZE_MASK) {
	case I845_TSEG_SIZE_512K:
		return KB(512);
	case I845_TSEG_SIZE_1M:
		return MB(1);
	switch (tseg_size) {
	case I845_TSEG_SIZE_512K:	return KB(512);
	case I845_TSEG_SIZE_1M:		return MB(1);
	default:
		WARN_ON(1);
		return 0;
		WARN(1, "Unknown ESMRAMC value: %x!\n", esmramc);
	}
	return 0;
}

static size_t __init i85x_tseg_size(void)
{
	u8 tmp = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);
	u8 esmramc = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);

	if (!(tmp & TSEG_ENABLE))
	if (!(esmramc & TSEG_ENABLE))
		return 0;

	return MB(1);
@@ -300,287 +282,289 @@ static size_t __init i85x_mem_size(void)
 * On 830/845/85x the stolen memory base isn't available in any
 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
 */
static u32 __init i830_stolen_base(int num, int slot, int func, size_t stolen_size)
static phys_addr_t __init i830_stolen_base(int num, int slot, int func,
					   size_t stolen_size)
{
	return i830_mem_size() - i830_tseg_size() - stolen_size;
	return (phys_addr_t)i830_mem_size() - i830_tseg_size() - stolen_size;
}

static u32 __init i845_stolen_base(int num, int slot, int func, size_t stolen_size)
static phys_addr_t __init i845_stolen_base(int num, int slot, int func,
					   size_t stolen_size)
{
	return i830_mem_size() - i845_tseg_size() - stolen_size;
	return (phys_addr_t)i830_mem_size() - i845_tseg_size() - stolen_size;
}

static u32 __init i85x_stolen_base(int num, int slot, int func, size_t stolen_size)
static phys_addr_t __init i85x_stolen_base(int num, int slot, int func,
					   size_t stolen_size)
{
	return i85x_mem_size() - i85x_tseg_size() - stolen_size;
	return (phys_addr_t)i85x_mem_size() - i85x_tseg_size() - stolen_size;
}

static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size)
static phys_addr_t __init i865_stolen_base(int num, int slot, int func,
					   size_t stolen_size)
{
	u16 toud;

	/*
	 * FIXME is the graphics stolen memory region
	 * always at TOUD? Ie. is it always the last
	 * one to be allocated by the BIOS?
	 */
	return read_pci_config_16(0, 0, 0, I865_TOUD) << 16;
	toud = read_pci_config_16(0, 0, 0, I865_TOUD);

	return (phys_addr_t)toud << 16;
}

static phys_addr_t __init gen3_stolen_base(int num, int slot, int func,
					   size_t stolen_size)
{
	u32 bsm;

	/* Almost universally we can find the Graphics Base of Stolen Memory
	 * at register BSM (0x5c) in the igfx configuration space. On a few
	 * (desktop) machines this is also mirrored in the bridge device at
	 * different locations, or in the MCHBAR.
	 */
	bsm = read_pci_config(num, slot, func, INTEL_BSM);

	return (phys_addr_t)bsm & INTEL_BSM_MASK;
}

static size_t __init i830_stolen_size(int num, int slot, int func)
{
	size_t stolen_size;
	u16 gmch_ctrl;
	u16 gms;

	gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
	gms = gmch_ctrl & I830_GMCH_GMS_MASK;

	switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
	case I830_GMCH_GMS_STOLEN_512:
		stolen_size = KB(512);
		break;
	case I830_GMCH_GMS_STOLEN_1024:
		stolen_size = MB(1);
		break;
	case I830_GMCH_GMS_STOLEN_8192:
		stolen_size = MB(8);
		break;
	case I830_GMCH_GMS_LOCAL:
	switch (gms) {
	case I830_GMCH_GMS_STOLEN_512:	return KB(512);
	case I830_GMCH_GMS_STOLEN_1024:	return MB(1);
	case I830_GMCH_GMS_STOLEN_8192:	return MB(8);
	/* local memory isn't part of the normal address space */
		stolen_size = 0;
		break;
	case I830_GMCH_GMS_LOCAL:	return 0;
	default:
		return 0;
		WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
	}

	return stolen_size;
	return 0;
}

static size_t __init gen3_stolen_size(int num, int slot, int func)
{
	size_t stolen_size;
	u16 gmch_ctrl;
	u16 gms;

	gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);

	switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
	case I855_GMCH_GMS_STOLEN_1M:
		stolen_size = MB(1);
		break;
	case I855_GMCH_GMS_STOLEN_4M:
		stolen_size = MB(4);
		break;
	case I855_GMCH_GMS_STOLEN_8M:
		stolen_size = MB(8);
		break;
	case I855_GMCH_GMS_STOLEN_16M:
		stolen_size = MB(16);
		break;
	case I855_GMCH_GMS_STOLEN_32M:
		stolen_size = MB(32);
		break;
	case I915_GMCH_GMS_STOLEN_48M:
		stolen_size = MB(48);
		break;
	case I915_GMCH_GMS_STOLEN_64M:
		stolen_size = MB(64);
		break;
	case G33_GMCH_GMS_STOLEN_128M:
		stolen_size = MB(128);
		break;
	case G33_GMCH_GMS_STOLEN_256M:
		stolen_size = MB(256);
		break;
	case INTEL_GMCH_GMS_STOLEN_96M:
		stolen_size = MB(96);
		break;
	case INTEL_GMCH_GMS_STOLEN_160M:
		stolen_size = MB(160);
		break;
	case INTEL_GMCH_GMS_STOLEN_224M:
		stolen_size = MB(224);
		break;
	case INTEL_GMCH_GMS_STOLEN_352M:
		stolen_size = MB(352);
		break;
	gms = gmch_ctrl & I855_GMCH_GMS_MASK;

	switch (gms) {
	case I855_GMCH_GMS_STOLEN_1M:	return MB(1);
	case I855_GMCH_GMS_STOLEN_4M:	return MB(4);
	case I855_GMCH_GMS_STOLEN_8M:	return MB(8);
	case I855_GMCH_GMS_STOLEN_16M:	return MB(16);
	case I855_GMCH_GMS_STOLEN_32M:	return MB(32);
	case I915_GMCH_GMS_STOLEN_48M:	return MB(48);
	case I915_GMCH_GMS_STOLEN_64M:	return MB(64);
	case G33_GMCH_GMS_STOLEN_128M:	return MB(128);
	case G33_GMCH_GMS_STOLEN_256M:	return MB(256);
	case INTEL_GMCH_GMS_STOLEN_96M:	return MB(96);
	case INTEL_GMCH_GMS_STOLEN_160M:return MB(160);
	case INTEL_GMCH_GMS_STOLEN_224M:return MB(224);
	case INTEL_GMCH_GMS_STOLEN_352M:return MB(352);
	default:
		stolen_size = 0;
		break;
		WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
	}

	return stolen_size;
	return 0;
}

static size_t __init gen6_stolen_size(int num, int slot, int func)
{
	u16 gmch_ctrl;
	u16 gms;

	gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
	gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
	gmch_ctrl &= SNB_GMCH_GMS_MASK;
	gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;

	return gmch_ctrl << 25; /* 32 MB units */
	return (size_t)gms * MB(32);
}

static size_t __init gen8_stolen_size(int num, int slot, int func)
{
	u16 gmch_ctrl;
	u16 gms;

	gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
	gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
	gmch_ctrl &= BDW_GMCH_GMS_MASK;
	return gmch_ctrl << 25; /* 32 MB units */
	gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;

	return (size_t)gms * MB(32);
}

static size_t __init chv_stolen_size(int num, int slot, int func)
{
	u16 gmch_ctrl;
	u16 gms;

	gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
	gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
	gmch_ctrl &= SNB_GMCH_GMS_MASK;
	gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;

	/*
	 * 0x0  to 0x10: 32MB increments starting at 0MB
	 * 0x11 to 0x16: 4MB increments starting at 8MB
	 * 0x17 to 0x1d: 4MB increments start at 36MB
	 */
	if (gmch_ctrl < 0x11)
		return gmch_ctrl << 25;
	else if (gmch_ctrl < 0x17)
		return (gmch_ctrl - 0x11 + 2) << 22;
	if (gms < 0x11)
		return (size_t)gms * MB(32);
	else if (gms < 0x17)
		return (size_t)(gms - 0x11 + 2) * MB(4);
	else
		return (gmch_ctrl - 0x17 + 9) << 22;
		return (size_t)(gms - 0x17 + 9) * MB(4);
}

struct intel_stolen_funcs {
	size_t (*size)(int num, int slot, int func);
	u32 (*base)(int num, int slot, int func, size_t size);
};

static size_t __init gen9_stolen_size(int num, int slot, int func)
{
	u16 gmch_ctrl;
	u16 gms;

	gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
	gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
	gmch_ctrl &= BDW_GMCH_GMS_MASK;
	gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;

	if (gmch_ctrl < 0xf0)
		return gmch_ctrl << 25; /* 32 MB units */
	/* 0x0  to 0xef: 32MB increments starting at 0MB */
	/* 0xf0 to 0xfe: 4MB increments starting at 4MB */
	if (gms < 0xf0)
		return (size_t)gms * MB(32);
	else
		/* 4MB increments starting at 0xf0 for 4MB */
		return (gmch_ctrl - 0xf0 + 1) << 22;
		return (size_t)(gms - 0xf0 + 1) * MB(4);
}

typedef size_t (*stolen_size_fn)(int num, int slot, int func);
struct intel_early_ops {
	size_t (*stolen_size)(int num, int slot, int func);
	phys_addr_t (*stolen_base)(int num, int slot, int func, size_t size);
};

static const struct intel_stolen_funcs i830_stolen_funcs __initconst = {
	.base = i830_stolen_base,
	.size = i830_stolen_size,
static const struct intel_early_ops i830_early_ops __initconst = {
	.stolen_base = i830_stolen_base,
	.stolen_size = i830_stolen_size,
};

static const struct intel_stolen_funcs i845_stolen_funcs __initconst = {
	.base = i845_stolen_base,
	.size = i830_stolen_size,
static const struct intel_early_ops i845_early_ops __initconst = {
	.stolen_base = i845_stolen_base,
	.stolen_size = i830_stolen_size,
};

static const struct intel_stolen_funcs i85x_stolen_funcs __initconst = {
	.base = i85x_stolen_base,
	.size = gen3_stolen_size,
static const struct intel_early_ops i85x_early_ops __initconst = {
	.stolen_base = i85x_stolen_base,
	.stolen_size = gen3_stolen_size,
};

static const struct intel_stolen_funcs i865_stolen_funcs __initconst = {
	.base = i865_stolen_base,
	.size = gen3_stolen_size,
static const struct intel_early_ops i865_early_ops __initconst = {
	.stolen_base = i865_stolen_base,
	.stolen_size = gen3_stolen_size,
};

static const struct intel_stolen_funcs gen3_stolen_funcs __initconst = {
	.base = intel_stolen_base,
	.size = gen3_stolen_size,
static const struct intel_early_ops gen3_early_ops __initconst = {
	.stolen_base = gen3_stolen_base,
	.stolen_size = gen3_stolen_size,
};

static const struct intel_stolen_funcs gen6_stolen_funcs __initconst = {
	.base = intel_stolen_base,
	.size = gen6_stolen_size,
static const struct intel_early_ops gen6_early_ops __initconst = {
	.stolen_base = gen3_stolen_base,
	.stolen_size = gen6_stolen_size,
};

static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = {
	.base = intel_stolen_base,
	.size = gen8_stolen_size,
static const struct intel_early_ops gen8_early_ops __initconst = {
	.stolen_base = gen3_stolen_base,
	.stolen_size = gen8_stolen_size,
};

static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = {
	.base = intel_stolen_base,
	.size = gen9_stolen_size,
static const struct intel_early_ops gen9_early_ops __initconst = {
	.stolen_base = gen3_stolen_base,
	.stolen_size = gen9_stolen_size,
};

static const struct intel_stolen_funcs chv_stolen_funcs __initconst = {
	.base = intel_stolen_base,
	.size = chv_stolen_size,
static const struct intel_early_ops chv_early_ops __initconst = {
	.stolen_base = gen3_stolen_base,
	.stolen_size = chv_stolen_size,
};

static const struct pci_device_id intel_stolen_ids[] __initconst = {
	INTEL_I830_IDS(&i830_stolen_funcs),
	INTEL_I845G_IDS(&i845_stolen_funcs),
	INTEL_I85X_IDS(&i85x_stolen_funcs),
	INTEL_I865G_IDS(&i865_stolen_funcs),
	INTEL_I915G_IDS(&gen3_stolen_funcs),
	INTEL_I915GM_IDS(&gen3_stolen_funcs),
	INTEL_I945G_IDS(&gen3_stolen_funcs),
	INTEL_I945GM_IDS(&gen3_stolen_funcs),
	INTEL_VLV_M_IDS(&gen6_stolen_funcs),
	INTEL_VLV_D_IDS(&gen6_stolen_funcs),
	INTEL_PINEVIEW_IDS(&gen3_stolen_funcs),
	INTEL_I965G_IDS(&gen3_stolen_funcs),
	INTEL_G33_IDS(&gen3_stolen_funcs),
	INTEL_I965GM_IDS(&gen3_stolen_funcs),
	INTEL_GM45_IDS(&gen3_stolen_funcs),
	INTEL_G45_IDS(&gen3_stolen_funcs),
	INTEL_IRONLAKE_D_IDS(&gen3_stolen_funcs),
	INTEL_IRONLAKE_M_IDS(&gen3_stolen_funcs),
	INTEL_SNB_D_IDS(&gen6_stolen_funcs),
	INTEL_SNB_M_IDS(&gen6_stolen_funcs),
	INTEL_IVB_M_IDS(&gen6_stolen_funcs),
	INTEL_IVB_D_IDS(&gen6_stolen_funcs),
	INTEL_HSW_D_IDS(&gen6_stolen_funcs),
	INTEL_HSW_M_IDS(&gen6_stolen_funcs),
	INTEL_BDW_M_IDS(&gen8_stolen_funcs),
	INTEL_BDW_D_IDS(&gen8_stolen_funcs),
	INTEL_CHV_IDS(&chv_stolen_funcs),
	INTEL_SKL_IDS(&gen9_stolen_funcs),
	INTEL_BXT_IDS(&gen9_stolen_funcs),
	INTEL_KBL_IDS(&gen9_stolen_funcs),
static const struct pci_device_id intel_early_ids[] __initconst = {
	INTEL_I830_IDS(&i830_early_ops),
	INTEL_I845G_IDS(&i845_early_ops),
	INTEL_I85X_IDS(&i85x_early_ops),
	INTEL_I865G_IDS(&i865_early_ops),
	INTEL_I915G_IDS(&gen3_early_ops),
	INTEL_I915GM_IDS(&gen3_early_ops),
	INTEL_I945G_IDS(&gen3_early_ops),
	INTEL_I945GM_IDS(&gen3_early_ops),
	INTEL_VLV_M_IDS(&gen6_early_ops),
	INTEL_VLV_D_IDS(&gen6_early_ops),
	INTEL_PINEVIEW_IDS(&gen3_early_ops),
	INTEL_I965G_IDS(&gen3_early_ops),
	INTEL_G33_IDS(&gen3_early_ops),
	INTEL_I965GM_IDS(&gen3_early_ops),
	INTEL_GM45_IDS(&gen3_early_ops),
	INTEL_G45_IDS(&gen3_early_ops),
	INTEL_IRONLAKE_D_IDS(&gen3_early_ops),
	INTEL_IRONLAKE_M_IDS(&gen3_early_ops),
	INTEL_SNB_D_IDS(&gen6_early_ops),
	INTEL_SNB_M_IDS(&gen6_early_ops),
	INTEL_IVB_M_IDS(&gen6_early_ops),
	INTEL_IVB_D_IDS(&gen6_early_ops),
	INTEL_HSW_D_IDS(&gen6_early_ops),
	INTEL_HSW_M_IDS(&gen6_early_ops),
	INTEL_BDW_M_IDS(&gen8_early_ops),
	INTEL_BDW_D_IDS(&gen8_early_ops),
	INTEL_CHV_IDS(&chv_early_ops),
	INTEL_SKL_IDS(&gen9_early_ops),
	INTEL_BXT_IDS(&gen9_early_ops),
	INTEL_KBL_IDS(&gen9_early_ops),
};

static void __init intel_graphics_stolen(int num, int slot, int func)
static void __init
intel_graphics_stolen(int num, int slot, int func,
		      const struct intel_early_ops *early_ops)
{
	phys_addr_t base, end;
	size_t size;
	int i;
	u32 start;
	u16 device, subvendor, subdevice;

	device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
	subvendor = read_pci_config_16(num, slot, func,
				       PCI_SUBSYSTEM_VENDOR_ID);
	subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID);

	for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) {
		if (intel_stolen_ids[i].device == device) {
			const struct intel_stolen_funcs *stolen_funcs =
				(const struct intel_stolen_funcs *)intel_stolen_ids[i].driver_data;
			size = stolen_funcs->size(num, slot, func);
			start = stolen_funcs->base(num, slot, func, size);
			if (size && start) {
				printk(KERN_INFO "Reserving Intel graphics stolen memory at 0x%x-0x%x\n",
				       start, start + (u32)size - 1);
	size = early_ops->stolen_size(num, slot, func);
	base = early_ops->stolen_base(num, slot, func, size);

	if (!size || !base)
		return;

	end = base + size - 1;
	printk(KERN_INFO "Reserving Intel graphics memory at %pa-%pa\n",
	       &base, &end);

	/* Mark this space as reserved */
				e820_add_region(start, size, E820_RESERVED);
				sanitize_e820_map(e820.map,
						  ARRAY_SIZE(e820.map),
						  &e820.nr_map);
	e820_add_region(base, size, E820_RESERVED);
	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
}

static void __init intel_graphics_quirks(int num, int slot, int func)
{
	const struct intel_early_ops *early_ops;
	u16 device;
	int i;

	device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);

	for (i = 0; i < ARRAY_SIZE(intel_early_ids); i++) {
		kernel_ulong_t driver_data = intel_early_ids[i].driver_data;

		if (intel_early_ids[i].device != device)
			continue;

		early_ops = (typeof(early_ops))driver_data;

		intel_graphics_stolen(num, slot, func, early_ops);

		return;
	}
}
}

static void __init force_disable_hpet(int num, int slot, int func)
{
@@ -627,7 +611,7 @@ static struct chipset early_qrk[] __initdata = {
	{ PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
	  PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
	  QFLAG_APPLY_ONCE, intel_graphics_stolen },
	  QFLAG_APPLY_ONCE, intel_graphics_quirks },
	/*
	 * HPET on the current version of the Baytrail platform has accuracy
	 * problems: it will halt in deep idle state - so we disable it.
+31 −0
Original line number Diff line number Diff line
@@ -242,6 +242,37 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
	store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
}

/**
 * drm_accurate_vblank_count - retrieve the master vblank counter
 * @crtc: which counter to retrieve
 *
 * This function is similar to @drm_crtc_vblank_count but this
 * function interpolates to handle a race with vblank irq's.
 *
 * This is mostly useful for hardware that can obtain the scanout
 * position, but doesn't have a frame counter.
 */
u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
{
	struct drm_device *dev = crtc->dev;
	unsigned int pipe = drm_crtc_index(crtc);
	u32 vblank;
	unsigned long flags;

	WARN(!dev->driver->get_vblank_timestamp,
	     "This function requires support for accurate vblank timestamps.");

	spin_lock_irqsave(&dev->vblank_time_lock, flags);

	drm_update_vblank_count(dev, pipe, 0);
	vblank = drm_vblank_count(dev, pipe);

	spin_unlock_irqrestore(&dev->vblank_time_lock, flags);

	return vblank;
}
EXPORT_SYMBOL(drm_accurate_vblank_count);

/*
 * Disable vblank irq's on crtc, make sure that last vblank count
 * of hardware and corresponding consistent software vblank counter
+3 −0
Original line number Diff line number Diff line
@@ -59,6 +59,7 @@ i915-y += intel_audio.o \
	  intel_bios.o \
	  intel_color.o \
	  intel_display.o \
	  intel_dpio_phy.o \
	  intel_dpll_mgr.o \
	  intel_fbc.o \
	  intel_fifo_underrun.o \
@@ -81,10 +82,12 @@ i915-y += dvo_ch7017.o \
	  dvo_tfp410.o \
	  intel_crt.o \
	  intel_ddi.o \
	  intel_dp_aux_backlight.o \
	  intel_dp_link_training.o \
	  intel_dp_mst.o \
	  intel_dp.o \
	  intel_dsi.o \
	  intel_dsi_dcs_backlight.o \
	  intel_dsi_panel_vbt.o \
	  intel_dsi_pll.o \
	  intel_dvo.o \
+35 −9
Original line number Diff line number Diff line
@@ -215,7 +215,8 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
	CMD(  MI_RS_CONTEXT,                    SMI,    F,  1,      S  ),
	CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   M  ),
	CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   R  ),
	CMD(  MI_LOAD_REGISTER_REG,             SMI,   !F,  0xFF,   R  ),
	CMD(  MI_LOAD_REGISTER_REG,             SMI,   !F,  0xFF,   W,
	      .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 }    ),
	CMD(  MI_RS_STORE_DATA_IMM,             SMI,   !F,  0xFF,   S  ),
	CMD(  MI_LOAD_URB_MEM,                  SMI,   !F,  0xFF,   S  ),
	CMD(  MI_STORE_URB_MEM,                 SMI,   !F,  0xFF,   S  ),
@@ -750,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
	int cmd_table_count;
	int ret;

	if (!IS_GEN7(engine->dev))
	if (!IS_GEN7(engine->i915))
		return 0;

	switch (engine->id) {
	case RCS:
		if (IS_HASWELL(engine->dev)) {
		if (IS_HASWELL(engine->i915)) {
			cmd_tables = hsw_render_ring_cmds;
			cmd_table_count =
				ARRAY_SIZE(hsw_render_ring_cmds);
@@ -764,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
			cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
		}

		if (IS_HASWELL(engine->dev)) {
		if (IS_HASWELL(engine->i915)) {
			engine->reg_tables = hsw_render_reg_tables;
			engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
		} else {
@@ -780,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
		engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
		break;
	case BCS:
		if (IS_HASWELL(engine->dev)) {
		if (IS_HASWELL(engine->i915)) {
			cmd_tables = hsw_blt_ring_cmds;
			cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
		} else {
@@ -788,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
			cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
		}

		if (IS_HASWELL(engine->dev)) {
		if (IS_HASWELL(engine->i915)) {
			engine->reg_tables = hsw_blt_reg_tables;
			engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
		} else {
@@ -1035,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
	if (!engine->needs_cmd_parser)
		return false;

	if (!USES_PPGTT(engine->dev))
	if (!USES_PPGTT(engine->i915))
		return false;

	return (i915.enable_cmd_parser == 1);
@@ -1098,6 +1099,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
					return false;
				}

				if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
					DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n");
					return false;
				}

				if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
					*oacontrol_set = (cmd[offset + 1] != 0);
			}
@@ -1113,6 +1119,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
					return false;
				}

				if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
					DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
							 reg_addr);
					return false;
				}

				if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
				    (offset + 2 > length ||
				     (cmd[offset + 1] & reg->mask) != reg->value)) {
@@ -1275,8 +1287,21 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
 *
 * Return: the current version number of the cmd parser
 */
int i915_cmd_parser_get_version(void)
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	bool active = false;

	/* If the command parser is not enabled, report 0 - unsupported */
	for_each_engine(engine, dev_priv) {
		if (i915_needs_cmd_parser(engine)) {
			active = true;
			break;
		}
	}
	if (!active)
		return 0;

	/*
	 * Command parser version history
	 *
@@ -1288,6 +1313,7 @@ int i915_cmd_parser_get_version(void)
	 * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
	 * 5. GPGPU dispatch compute indirect registers.
	 * 6. TIMESTAMP register and Haswell CS GPR registers
	 * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
	 */
	return 6;
	return 7;
}
+28 −33

File changed.

Preview size limit exceeded, changes collapsed.

Loading