Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dc87eaf1 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-nouveau-next' of...

Merge branch 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6 into drm-next

* 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6: (50 commits)
  drm/nv50: flesh out ZCULL init and match nvidia on later chipsets
  drm/nv50: support for compression
  drm/nv50-nvc0: delay GART binding until move_notify time
  drm/nouveau: rename nouveau_vram to nouveau_mem
  drm/nvc0: allow creation of buffers with any non-compressed memtype
  drm/nv50-nvc0: unmap buffers from the vm when they're evicted
  drm/nv50-nvc0: move vm bind/unbind to move_notify hook
  drm/nv50-nvc0: restrict memtype to those specified at creation time
  drm/nouveau: pass domain rather than ttm flags to gem_new()
  drm/nv50: simplify bo moves now that they're all through the vm
  drm/nouveau: remove no_vm/mappable flags from nouveau_bo
  drm/nouveau: Fix pageflip event
  drm/nouveau/vbios: parse more gpio tag bits from connector table
  drm/nouveau: decode PFIFO DMA_PUSHER error codes
  drm/nv50: fix typos in CCACHE error reporting
  drm/nvc0: support for sw methods + enable page flipping
  drm/nv50: enable page flipping
  drm/nv50-nvc0: activate/update ds channel's framebuffer on modesets
  drm/nv50-nvc0: initialise display sync channels
  drm/nv50-nvc0: precalculate some fb state when creating them
  ...
parents a2c06ee2 562af10c
Loading
Loading
Loading
Loading
+17 −26
Original line number Diff line number Diff line
@@ -282,7 +282,7 @@ static void still_alive(void)
{
#if 0
	sync();
	msleep(2);
	mdelay(2);
#endif
}

@@ -1904,7 +1904,7 @@ init_condition_time(struct nvbios *bios, uint16_t offset,
			BIOSLOG(bios, "0x%04X: "
				"Condition not met, sleeping for 20ms\n",
								offset);
			msleep(20);
			mdelay(20);
		}
	}

@@ -1938,7 +1938,7 @@ init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
	BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n",
		offset, time);

	msleep(time);
	mdelay(time);

	return 3;
}
@@ -2962,7 +2962,7 @@ init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
	if (time < 1000)
		udelay(time);
	else
		msleep((time + 900) / 1000);
		mdelay((time + 900) / 1000);

	return 3;
}
@@ -3856,7 +3856,7 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr

	if (script == LVDS_PANEL_OFF) {
		/* off-on delay in ms */
		msleep(ROM16(bios->data[bios->fp.xlated_entry + 7]));
		mdelay(ROM16(bios->data[bios->fp.xlated_entry + 7]));
	}
#ifdef __powerpc__
	/* Powerbook specific quirks */
@@ -5950,6 +5950,11 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
	}
}

static const u8 hpd_gpio[16] = {
	0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
	0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
};

static void
parse_dcb_connector_table(struct nvbios *bios)
{
@@ -5986,23 +5991,9 @@ parse_dcb_connector_table(struct nvbios *bios)

		cte->type  = (cte->entry & 0x000000ff) >> 0;
		cte->index2 = (cte->entry & 0x00000f00) >> 8;
		switch (cte->entry & 0x00033000) {
		case 0x00001000:
			cte->gpio_tag = 0x07;
			break;
		case 0x00002000:
			cte->gpio_tag = 0x08;
			break;
		case 0x00010000:
			cte->gpio_tag = 0x51;
			break;
		case 0x00020000:
			cte->gpio_tag = 0x52;
			break;
		default:
			cte->gpio_tag = 0xff;
			break;
		}

		cte->gpio_tag = ffs((cte->entry & 0x07033000) >> 12);
		cte->gpio_tag = hpd_gpio[cte->gpio_tag];

		if (cte->type == 0xff)
			continue;
@@ -6228,7 +6219,7 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
		entry->tvconf.has_component_output = false;
		break;
	case OUTPUT_LVDS:
		if ((conn & 0x00003f00) != 0x10)
		if ((conn & 0x00003f00) >> 8 != 0x10)
			entry->lvdsconf.use_straps_for_mode = true;
		entry->lvdsconf.use_power_scripts = true;
		break;
@@ -6702,11 +6693,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
	struct nvbios *bios = &dev_priv->vbios;
	struct init_exec iexec = { true, false };

	mutex_lock(&bios->lock);
	spin_lock_bh(&bios->lock);
	bios->display.output = dcbent;
	parse_init_table(bios, table, &iexec);
	bios->display.output = NULL;
	mutex_unlock(&bios->lock);
	spin_unlock_bh(&bios->lock);
}

static bool NVInitVBIOS(struct drm_device *dev)
@@ -6715,7 +6706,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
	struct nvbios *bios = &dev_priv->vbios;

	memset(bios, 0, sizeof(struct nvbios));
	mutex_init(&bios->lock);
	spin_lock_init(&bios->lock);
	bios->dev = dev;

	if (!NVShadowVBIOS(dev, bios->data))
+1 −1
Original line number Diff line number Diff line
@@ -251,7 +251,7 @@ struct nvbios {
	uint8_t digital_min_front_porch;
	bool fp_no_ddc;

	struct mutex lock;
	spinlock_t lock;

	uint8_t data[NV_PROM_SIZE];
	unsigned int length;
+154 −114
Original line number Diff line number Diff line
@@ -54,8 +54,8 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
}

static void
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
		       int *page_shift)
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
		       int *align, int *size, int *page_shift)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);

@@ -80,7 +80,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
		}
	} else {
		if (likely(dev_priv->chan_vm)) {
			if (*size > 256 * 1024)
			if (!(flags & TTM_PL_FLAG_TT) &&  *size > 256 * 1024)
				*page_shift = dev_priv->chan_vm->lpg_shift;
			else
				*page_shift = dev_priv->chan_vm->spg_shift;
@@ -98,8 +98,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
int
nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
	       int size, int align, uint32_t flags, uint32_t tile_mode,
	       uint32_t tile_flags, bool no_vm, bool mappable,
	       struct nouveau_bo **pnvbo)
	       uint32_t tile_flags, struct nouveau_bo **pnvbo)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_bo *nvbo;
@@ -110,16 +109,14 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
		return -ENOMEM;
	INIT_LIST_HEAD(&nvbo->head);
	INIT_LIST_HEAD(&nvbo->entry);
	nvbo->mappable = mappable;
	nvbo->no_vm = no_vm;
	nvbo->tile_mode = tile_mode;
	nvbo->tile_flags = tile_flags;
	nvbo->bo.bdev = &dev_priv->ttm.bdev;

	nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
	nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
	align >>= PAGE_SHIFT;

	if (!nvbo->no_vm && dev_priv->chan_vm) {
	if (dev_priv->chan_vm) {
		ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
				     NV_MEM_ACCESS_RW, &nvbo->vma);
		if (ret) {
@@ -128,6 +125,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
		}
	}

	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
	nouveau_bo_placement_set(nvbo, flags, 0);

	nvbo->channel = chan;
@@ -140,11 +138,8 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
	}
	nvbo->channel = NULL;

	if (nvbo->vma.node) {
		if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
	if (nvbo->vma.node)
		nvbo->bo.offset = nvbo->vma.offset;
	}

	*pnvbo = nvbo;
	return 0;
}
@@ -166,17 +161,17 @@ static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
	int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;

	if (dev_priv->card_type == NV_10 &&
	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
	    nvbo->bo.mem.num_pages < vram_pages / 2) {
		/*
		 * Make sure that the color and depth buffers are handled
		 * by independent memory controller units. Up to a 9x
		 * speed up when alpha-blending and depth-test are enabled
		 * at the same time.
		 */
		int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;

		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
			nvbo->placement.fpfn = vram_pages / 2;
			nvbo->placement.lpfn = ~0;
@@ -314,11 +309,8 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
	if (ret)
		return ret;

	if (nvbo->vma.node) {
		if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
	if (nvbo->vma.node)
		nvbo->bo.offset = nvbo->vma.offset;
	}

	return 0;
}

@@ -381,7 +373,8 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
	case NOUVEAU_GART_AGP:
		return ttm_agp_backend_init(bdev, dev->agp->bridge);
#endif
	case NOUVEAU_GART_SGDMA:
	case NOUVEAU_GART_PDMA:
	case NOUVEAU_GART_HW:
		return nouveau_sgdma_init_ttm(dev);
	default:
		NV_ERROR(dev, "Unknown GART type %d\n",
@@ -427,6 +420,9 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
		man->default_caching = TTM_PL_FLAG_WC;
		break;
	case TTM_PL_TT:
		if (dev_priv->card_type >= NV_50)
			man->func = &nouveau_gart_manager;
		else
			man->func = &ttm_bo_manager_func;
		switch (dev_priv->gart_info.type) {
		case NOUVEAU_GART_AGP:
@@ -435,7 +431,8 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
				TTM_PL_FLAG_WC;
			man->default_caching = TTM_PL_FLAG_WC;
			break;
		case NOUVEAU_GART_SGDMA:
		case NOUVEAU_GART_PDMA:
		case NOUVEAU_GART_HW:
			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
				     TTM_MEMTYPE_FLAG_CMA;
			man->available_caching = TTM_PL_MASK_CACHING;
@@ -497,45 +494,22 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
	return ret;
}

static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
		      struct nouveau_channel *chan, struct ttm_mem_reg *mem)
{
	struct nouveau_bo *nvbo = nouveau_bo(bo);

	if (nvbo->no_vm) {
		if (mem->mem_type == TTM_PL_TT)
			return NvDmaGART;
		return NvDmaVRAM;
	}

	if (mem->mem_type == TTM_PL_TT)
		return chan->gart_handle;
	return chan->vram_handle;
}

static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
	struct nouveau_mem *old_node = old_mem->mm_node;
	struct nouveau_mem *new_node = new_mem->mm_node;
	struct nouveau_bo *nvbo = nouveau_bo(bo);
	u64 src_offset = old_mem->start << PAGE_SHIFT;
	u64 dst_offset = new_mem->start << PAGE_SHIFT;
	u32 page_count = new_mem->num_pages;
	u64 src_offset, dst_offset;
	int ret;

	if (!nvbo->no_vm) {
		if (old_mem->mem_type == TTM_PL_VRAM)
			src_offset  = nvbo->vma.offset;
	src_offset = old_node->tmp_vma.offset;
	if (new_node->tmp_vma.node)
		dst_offset = new_node->tmp_vma.offset;
	else
			src_offset += dev_priv->gart_info.aper_base;

		if (new_mem->mem_type == TTM_PL_VRAM)
		dst_offset = nvbo->vma.offset;
		else
			dst_offset += dev_priv->gart_info.aper_base;
	}

	page_count = new_mem->num_pages;
	while (page_count) {
@@ -570,33 +544,18 @@ static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
	struct nouveau_mem *old_node = old_mem->mm_node;
	struct nouveau_mem *new_node = new_mem->mm_node;
	struct nouveau_bo *nvbo = nouveau_bo(bo);
	u64 length = (new_mem->num_pages << PAGE_SHIFT);
	u64 src_offset, dst_offset;
	int ret;

	src_offset = old_mem->start << PAGE_SHIFT;
	dst_offset = new_mem->start << PAGE_SHIFT;
	if (!nvbo->no_vm) {
		if (old_mem->mem_type == TTM_PL_VRAM)
			src_offset  = nvbo->vma.offset;
	src_offset = old_node->tmp_vma.offset;
	if (new_node->tmp_vma.node)
		dst_offset = new_node->tmp_vma.offset;
	else
			src_offset += dev_priv->gart_info.aper_base;

		if (new_mem->mem_type == TTM_PL_VRAM)
		dst_offset = nvbo->vma.offset;
		else
			dst_offset += dev_priv->gart_info.aper_base;
	}

	ret = RING_SPACE(chan, 3);
	if (ret)
		return ret;

	BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));

	while (length) {
		u32 amount, stride, height;
@@ -677,6 +636,15 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
	return 0;
}

static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
		      struct nouveau_channel *chan, struct ttm_mem_reg *mem)
{
	if (mem->mem_type == TTM_PL_TT)
		return chan->gart_handle;
	return chan->vram_handle;
}

static int
nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
@@ -730,15 +698,43 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
	struct nouveau_bo *nvbo = nouveau_bo(bo);
	struct ttm_mem_reg *old_mem = &bo->mem;
	struct nouveau_channel *chan;
	int ret;

	chan = nvbo->channel;
	if (!chan || nvbo->no_vm) {
	if (!chan) {
		chan = dev_priv->channel;
		mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
	}

	/* create temporary vma for old memory, this will get cleaned
	 * up after ttm destroys the ttm_mem_reg
	 */
	if (dev_priv->card_type >= NV_50) {
		struct nouveau_mem *node = old_mem->mm_node;
		if (!node->tmp_vma.node) {
			u32 page_shift = nvbo->vma.node->type;
			if (old_mem->mem_type == TTM_PL_TT)
				page_shift = nvbo->vma.vm->spg_shift;

			ret = nouveau_vm_get(chan->vm,
					     old_mem->num_pages << PAGE_SHIFT,
					     page_shift, NV_MEM_ACCESS_RO,
					     &node->tmp_vma);
			if (ret)
				goto out;
		}

		if (old_mem->mem_type == TTM_PL_VRAM)
			nouveau_vm_map(&node->tmp_vma, node);
		else {
			nouveau_vm_map_sg(&node->tmp_vma, 0,
					  old_mem->num_pages << PAGE_SHIFT,
					  node, node->pages);
		}
	}

	if (dev_priv->card_type < NV_50)
		ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
	else
@@ -752,6 +748,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
						    no_wait_gpu, new_mem);
	}

out:
	if (chan == dev_priv->channel)
		mutex_unlock(&chan->mutex);
	return ret;
@@ -762,6 +759,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
		      bool no_wait_reserve, bool no_wait_gpu,
		      struct ttm_mem_reg *new_mem)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
	struct ttm_placement placement;
	struct ttm_mem_reg tmp_mem;
@@ -781,11 +779,27 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
	if (ret)
		goto out;

	if (dev_priv->card_type >= NV_50) {
		struct nouveau_bo *nvbo = nouveau_bo(bo);
		struct nouveau_mem *node = tmp_mem.mm_node;
		struct nouveau_vma *vma = &nvbo->vma;
		if (vma->node->type != vma->vm->spg_shift)
			vma = &node->tmp_vma;
		nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
				  node, node->pages);
	}

	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);

	if (dev_priv->card_type >= NV_50) {
		struct nouveau_bo *nvbo = nouveau_bo(bo);
		nouveau_vm_unmap(&nvbo->vma);
	}

	if (ret)
		goto out;

	ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
	ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
out:
	ttm_bo_mem_put(bo, &tmp_mem);
	return ret;
@@ -811,11 +825,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
	if (ret)
		return ret;

	ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
	ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
	if (ret)
		goto out;

	ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
	if (ret)
		goto out;

@@ -824,6 +838,36 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
	return ret;
}

static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
	struct nouveau_mem *node = new_mem->mm_node;
	struct nouveau_bo *nvbo = nouveau_bo(bo);
	struct nouveau_vma *vma = &nvbo->vma;
	struct nouveau_vm *vm = vma->vm;

	if (dev_priv->card_type < NV_50)
		return;

	switch (new_mem->mem_type) {
	case TTM_PL_VRAM:
		nouveau_vm_map(vma, node);
		break;
	case TTM_PL_TT:
		if (vma->node->type != vm->spg_shift) {
			nouveau_vm_unmap(vma);
			vma = &node->tmp_vma;
		}
		nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
				  node, node->pages);
		break;
	default:
		nouveau_vm_unmap(&nvbo->vma);
		break;
	}
}

static int
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
		   struct nouveau_tile_reg **new_tile)
@@ -831,19 +875,13 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
	struct drm_device *dev = dev_priv->dev;
	struct nouveau_bo *nvbo = nouveau_bo(bo);
	uint64_t offset;
	u64 offset = new_mem->start << PAGE_SHIFT;

	if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
		/* Nothing to do. */
	*new_tile = NULL;
	if (new_mem->mem_type != TTM_PL_VRAM)
		return 0;
	}

	offset = new_mem->start << PAGE_SHIFT;

	if (dev_priv->chan_vm) {
		nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
	} else if (dev_priv->card_type >= NV_10) {
	if (dev_priv->card_type >= NV_10) {
		*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
						nvbo->tile_mode,
						nvbo->tile_flags);
@@ -860,12 +898,9 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
	struct drm_device *dev = dev_priv->dev;

	if (dev_priv->card_type >= NV_10 &&
	    dev_priv->card_type < NV_50) {
	nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
	*old_tile = new_tile;
}
}

static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
@@ -878,9 +913,11 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
	struct nouveau_tile_reg *new_tile = NULL;
	int ret = 0;

	if (dev_priv->card_type < NV_50) {
		ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
		if (ret)
			return ret;
	}

	/* Fake bo copy. */
	if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
@@ -911,10 +948,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
	ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);

out:
	if (dev_priv->card_type < NV_50) {
		if (ret)
			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
		else
			nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
	}

	return ret;
}
@@ -955,7 +994,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
		break;
	case TTM_PL_VRAM:
	{
		struct nouveau_vram *vram = mem->mm_node;
		struct nouveau_mem *node = mem->mm_node;
		u8 page_shift;

		if (!dev_priv->bar1_vm) {
@@ -966,23 +1005,23 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
		}

		if (dev_priv->card_type == NV_C0)
			page_shift = vram->page_shift;
			page_shift = node->page_shift;
		else
			page_shift = 12;

		ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
				     page_shift, NV_MEM_ACCESS_RW,
				     &vram->bar_vma);
				     &node->bar_vma);
		if (ret)
			return ret;

		nouveau_vm_map(&vram->bar_vma, vram);
		nouveau_vm_map(&node->bar_vma, node);
		if (ret) {
			nouveau_vm_put(&vram->bar_vma);
			nouveau_vm_put(&node->bar_vma);
			return ret;
		}

		mem->bus.offset = vram->bar_vma.offset;
		mem->bus.offset = node->bar_vma.offset;
		if (dev_priv->card_type == NV_50) /*XXX*/
			mem->bus.offset -= 0x0020000000ULL;
		mem->bus.base = pci_resource_start(dev->pdev, 1);
@@ -999,16 +1038,16 @@ static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
	struct nouveau_vram *vram = mem->mm_node;
	struct nouveau_mem *node = mem->mm_node;

	if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
		return;

	if (!vram->bar_vma.node)
	if (!node->bar_vma.node)
		return;

	nouveau_vm_unmap(&vram->bar_vma);
	nouveau_vm_put(&vram->bar_vma);
	nouveau_vm_unmap(&node->bar_vma);
	nouveau_vm_put(&node->bar_vma);
}

static int
@@ -1058,6 +1097,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
	.invalidate_caches = nouveau_bo_invalidate_caches,
	.init_mem_type = nouveau_bo_init_mem_type,
	.evict_flags = nouveau_bo_evict_flags,
	.move_notify = nouveau_bo_move_ntfy,
	.move = nouveau_bo_move,
	.verify_access = nouveau_bo_verify_access,
	.sync_obj_signaled = __nouveau_fence_signalled,
+2 −3
Original line number Diff line number Diff line
@@ -35,7 +35,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_bo *pb = chan->pushbuf_bo;
	struct nouveau_gpuobj *pushbuf = NULL;
	int ret;
	int ret = 0;

	if (dev_priv->card_type >= NV_50) {
		if (dev_priv->card_type < NV_C0) {
@@ -90,8 +90,7 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
	else
		location = TTM_PL_FLAG_TT;

	ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
			     true, &pushbuf);
	ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf);
	if (ret) {
		NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
		return NULL;
+1 −0
Original line number Diff line number Diff line
@@ -507,6 +507,7 @@ nouveau_connector_native_mode(struct drm_connector *connector)
	int high_w = 0, high_h = 0, high_v = 0;

	list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
		mode->vrefresh = drm_mode_vrefresh(mode);
		if (helper->mode_valid(connector, mode) != MODE_OK ||
		    (mode->flags & DRM_MODE_FLAG_INTERLACE))
			continue;
Loading