Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 73e60805 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "One regression fix, two radeon fixes (one for an oops), and an i915
  fix to unload framebuffers earlier.

  We originally were going to leave the i915 fix until -next, but grub2
  in some situations causes vesafb/efifb to be loaded now, and this
  causes big slowdowns, and I have reports in rawhide I'd like to have
  fixed."

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/i915: kick any firmware framebuffers before claiming the gtt
  drm: edid: Don't add inferred modes with higher resolution
  drm/radeon: fix rare segfault
  drm/radeon: fix VM page table setup on SI
parents 2fb748d2 9f846a16
Loading
Loading
Loading
Loading
+24 −3
Original line number Diff line number Diff line
@@ -1039,6 +1039,24 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
	return true;
}

static bool valid_inferred_mode(const struct drm_connector *connector,
				const struct drm_display_mode *mode)
{
	struct drm_display_mode *m;
	bool ok = false;

	list_for_each_entry(m, &connector->probed_modes, head) {
		if (mode->hdisplay == m->hdisplay &&
		    mode->vdisplay == m->vdisplay &&
		    drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
			return false; /* duplicated */
		if (mode->hdisplay <= m->hdisplay &&
		    mode->vdisplay <= m->vdisplay)
			ok = true;
	}
	return ok;
}

static int
drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
			struct detailed_timing *timing)
@@ -1048,7 +1066,8 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
	struct drm_device *dev = connector->dev;

	for (i = 0; i < drm_num_dmt_modes; i++) {
		if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
		if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
		    valid_inferred_mode(connector, drm_dmt_modes + i)) {
			newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
			if (newmode) {
				drm_mode_probed_add(connector, newmode);
@@ -1088,7 +1107,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
			return modes;

		fixup_mode_1366x768(newmode);
		if (!mode_in_range(newmode, edid, timing)) {
		if (!mode_in_range(newmode, edid, timing) ||
		    !valid_inferred_mode(connector, newmode)) {
			drm_mode_destroy(dev, newmode);
			continue;
		}
@@ -1116,7 +1136,8 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
			return modes;

		fixup_mode_1366x768(newmode);
		if (!mode_in_range(newmode, edid, timing)) {
		if (!mode_in_range(newmode, edid, timing) ||
		    !valid_inferred_mode(connector, newmode)) {
			drm_mode_destroy(dev, newmode);
			continue;
		}
+30 −7
Original line number Diff line number Diff line
@@ -1401,6 +1401,27 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
	}
}

static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
	struct apertures_struct *ap;
	struct pci_dev *pdev = dev_priv->dev->pdev;
	bool primary;

	ap = alloc_apertures(1);
	if (!ap)
		return;

	ap->ranges[0].base = dev_priv->dev->agp->base;
	ap->ranges[0].size =
		dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
	primary =
		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;

	remove_conflicting_framebuffers(ap, "inteldrmfb", primary);

	kfree(ap);
}

/**
 * i915_driver_load - setup chip and create an initial config
 * @dev: DRM device
@@ -1446,6 +1467,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
		goto free_priv;
	}

	dev_priv->mm.gtt = intel_gtt_get();
	if (!dev_priv->mm.gtt) {
		DRM_ERROR("Failed to initialize GTT\n");
		ret = -ENODEV;
		goto put_bridge;
	}

	i915_kick_out_firmware_fb(dev_priv);

	pci_set_master(dev->pdev);

	/* overlay on gen2 is broken and can't address above 1G */
@@ -1471,13 +1501,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
		goto put_bridge;
	}

	dev_priv->mm.gtt = intel_gtt_get();
	if (!dev_priv->mm.gtt) {
		DRM_ERROR("Failed to initialize GTT\n");
		ret = -ENODEV;
		goto out_rmmap;
	}

	aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;

	dev_priv->mm.gtt_mapping =
+11 −2
Original line number Diff line number Diff line
@@ -289,8 +289,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
	rdev->vm_manager.enabled = false;

	/* mark first vm as always in use, it's the system one */
	/* allocate enough for 2 full VM pts */
	r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
				      rdev->vm_manager.max_pfn * 8,
				      rdev->vm_manager.max_pfn * 8 * 2,
				      RADEON_GEM_DOMAIN_VRAM);
	if (r) {
		dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -633,6 +634,14 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
	mutex_init(&vm->mutex);
	INIT_LIST_HEAD(&vm->list);
	INIT_LIST_HEAD(&vm->va);
	/* SI requires equal sized PTs for all VMs, so always set
	 * last_pfn to max_pfn.  cayman allows variable sized
	 * pts so we can grow then as needed.  Once we switch
	 * to two level pts we can unify this again.
	 */
	if (rdev->family >= CHIP_TAHITI)
		vm->last_pfn = rdev->vm_manager.max_pfn;
	else
		vm->last_pfn = 0;
	/* map the ib pool buffer at 0 in virtual address space, set
	 * read only
+6 −4
Original line number Diff line number Diff line
@@ -292,6 +292,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *filp)
{
	struct radeon_device *rdev = dev->dev_private;
	struct drm_radeon_gem_busy *args = data;
	struct drm_gem_object *gobj;
	struct radeon_bo *robj;
@@ -317,13 +318,14 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
		break;
	}
	drm_gem_object_unreference_unlocked(gobj);
	r = radeon_gem_handle_lockup(robj->rdev, r);
	r = radeon_gem_handle_lockup(rdev, r);
	return r;
}

int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
			      struct drm_file *filp)
{
	struct radeon_device *rdev = dev->dev_private;
	struct drm_radeon_gem_wait_idle *args = data;
	struct drm_gem_object *gobj;
	struct radeon_bo *robj;
@@ -336,10 +338,10 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
	robj = gem_to_radeon_bo(gobj);
	r = radeon_bo_wait(robj, NULL, false);
	/* callback hw specific functions if any */
	if (robj->rdev->asic->ioctl_wait_idle)
		robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
	if (rdev->asic->ioctl_wait_idle)
		robj->rdev->asic->ioctl_wait_idle(rdev, robj);
	drm_gem_object_unreference_unlocked(gobj);
	r = radeon_gem_handle_lockup(robj->rdev, r);
	r = radeon_gem_handle_lockup(rdev, r);
	return r;
}

+2 −2
Original line number Diff line number Diff line
@@ -2365,12 +2365,12 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
	WREG32(0x15DC, 0);

	/* empty context1-15 */
	/* FIXME start with 1G, once using 2 level pt switch to full
	/* FIXME start with 4G, once using 2 level pt switch to full
	 * vm size space
	 */
	/* set vm size, must be a multiple of 4 */
	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, (1 << 30) / RADEON_GPU_PAGE_SIZE);
	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
	for (i = 1; i < 16; i++) {
		if (i < 8)
			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),