Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d550c41e authored by Ben Skeggs's avatar Ben Skeggs
Browse files

drm/nouveau: remove no_vm/mappable flags from nouveau_bo



'mappable' isn't really used at all, nor is it necessary anymore as the
bo code is capable of moving buffers to mappable vram as required.

'no_vm' isn't necessary anymore either, any places that don't want to be
mapped into a GPU address space should allocate the VRAM directly instead.

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 2503c6fa
Loading
Loading
Loading
Loading
+24 −38
Original line number Original line Diff line number Diff line
@@ -98,8 +98,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
int
int
nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
	       int size, int align, uint32_t flags, uint32_t tile_mode,
	       int size, int align, uint32_t flags, uint32_t tile_mode,
	       uint32_t tile_flags, bool no_vm, bool mappable,
	       uint32_t tile_flags, struct nouveau_bo **pnvbo)
	       struct nouveau_bo **pnvbo)
{
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_bo *nvbo;
	struct nouveau_bo *nvbo;
@@ -110,8 +109,6 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
		return -ENOMEM;
		return -ENOMEM;
	INIT_LIST_HEAD(&nvbo->head);
	INIT_LIST_HEAD(&nvbo->head);
	INIT_LIST_HEAD(&nvbo->entry);
	INIT_LIST_HEAD(&nvbo->entry);
	nvbo->mappable = mappable;
	nvbo->no_vm = no_vm;
	nvbo->tile_mode = tile_mode;
	nvbo->tile_mode = tile_mode;
	nvbo->tile_flags = tile_flags;
	nvbo->tile_flags = tile_flags;
	nvbo->bo.bdev = &dev_priv->ttm.bdev;
	nvbo->bo.bdev = &dev_priv->ttm.bdev;
@@ -119,7 +116,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
	nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
	nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
	align >>= PAGE_SHIFT;
	align >>= PAGE_SHIFT;


	if (!nvbo->no_vm && dev_priv->chan_vm) {
	if (dev_priv->chan_vm) {
		ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
		ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
				     NV_MEM_ACCESS_RW, &nvbo->vma);
				     NV_MEM_ACCESS_RW, &nvbo->vma);
		if (ret) {
		if (ret) {
@@ -504,14 +501,6 @@ static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
		      struct nouveau_channel *chan, struct ttm_mem_reg *mem)
		      struct nouveau_channel *chan, struct ttm_mem_reg *mem)
{
{
	struct nouveau_bo *nvbo = nouveau_bo(bo);

	if (nvbo->no_vm) {
		if (mem->mem_type == TTM_PL_TT)
			return NvDmaGART;
		return NvDmaVRAM;
	}

	if (mem->mem_type == TTM_PL_TT)
	if (mem->mem_type == TTM_PL_TT)
		return chan->gart_handle;
		return chan->gart_handle;
	return chan->vram_handle;
	return chan->vram_handle;
@@ -523,22 +512,21 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
{
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
	struct nouveau_bo *nvbo = nouveau_bo(bo);
	struct nouveau_bo *nvbo = nouveau_bo(bo);
	u64 src_offset = old_mem->start << PAGE_SHIFT;
	u64 dst_offset = new_mem->start << PAGE_SHIFT;
	u32 page_count = new_mem->num_pages;
	u32 page_count = new_mem->num_pages;
	u64 src_offset, dst_offset;
	int ret;
	int ret;


	if (!nvbo->no_vm) {
	src_offset = old_mem->start << PAGE_SHIFT;
	if (old_mem->mem_type == TTM_PL_VRAM)
	if (old_mem->mem_type == TTM_PL_VRAM)
		src_offset  = nvbo->vma.offset;
		src_offset  = nvbo->vma.offset;
	else
	else
		src_offset += dev_priv->gart_info.aper_base;
		src_offset += dev_priv->gart_info.aper_base;


	dst_offset = new_mem->start << PAGE_SHIFT;
	if (new_mem->mem_type == TTM_PL_VRAM)
	if (new_mem->mem_type == TTM_PL_VRAM)
		dst_offset  = nvbo->vma.offset;
		dst_offset  = nvbo->vma.offset;
	else
	else
		dst_offset += dev_priv->gart_info.aper_base;
		dst_offset += dev_priv->gart_info.aper_base;
	}


	page_count = new_mem->num_pages;
	page_count = new_mem->num_pages;
	while (page_count) {
	while (page_count) {
@@ -580,18 +568,16 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
	int ret;
	int ret;


	src_offset = old_mem->start << PAGE_SHIFT;
	src_offset = old_mem->start << PAGE_SHIFT;
	dst_offset = new_mem->start << PAGE_SHIFT;
	if (!nvbo->no_vm) {
	if (old_mem->mem_type == TTM_PL_VRAM)
	if (old_mem->mem_type == TTM_PL_VRAM)
		src_offset  = nvbo->vma.offset;
		src_offset  = nvbo->vma.offset;
	else
	else
		src_offset += dev_priv->gart_info.aper_base;
		src_offset += dev_priv->gart_info.aper_base;


	dst_offset = new_mem->start << PAGE_SHIFT;
	if (new_mem->mem_type == TTM_PL_VRAM)
	if (new_mem->mem_type == TTM_PL_VRAM)
		dst_offset  = nvbo->vma.offset;
		dst_offset  = nvbo->vma.offset;
	else
	else
		dst_offset += dev_priv->gart_info.aper_base;
		dst_offset += dev_priv->gart_info.aper_base;
	}


	ret = RING_SPACE(chan, 3);
	ret = RING_SPACE(chan, 3);
	if (ret)
	if (ret)
@@ -737,7 +723,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
	int ret;
	int ret;


	chan = nvbo->channel;
	chan = nvbo->channel;
	if (!chan || nvbo->no_vm) {
	if (!chan) {
		chan = dev_priv->channel;
		chan = dev_priv->channel;
		mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
		mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
	}
	}
@@ -836,7 +822,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
	struct nouveau_bo *nvbo = nouveau_bo(bo);
	struct nouveau_bo *nvbo = nouveau_bo(bo);
	uint64_t offset;
	uint64_t offset;


	if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
	if (new_mem->mem_type != TTM_PL_VRAM) {
		/* Nothing to do. */
		/* Nothing to do. */
		*new_tile = NULL;
		*new_tile = NULL;
		return 0;
		return 0;
+1 −2
Original line number Original line Diff line number Diff line
@@ -90,8 +90,7 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
	else
	else
		location = TTM_PL_FLAG_TT;
		location = TTM_PL_FLAG_TT;


	ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
	ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf);
			     true, &pushbuf);
	if (ret) {
	if (ret) {
		NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
		NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
		return NULL;
		return NULL;
+0 −2
Original line number Original line Diff line number Diff line
@@ -61,8 +61,6 @@ enum {
	NvM2MF		= 0x80000001,
	NvM2MF		= 0x80000001,
	NvDmaFB		= 0x80000002,
	NvDmaFB		= 0x80000002,
	NvDmaTT		= 0x80000003,
	NvDmaTT		= 0x80000003,
	NvDmaVRAM	= 0x80000004,
	NvDmaGART	= 0x80000005,
	NvNotify0       = 0x80000006,
	NvNotify0       = 0x80000006,
	Nv2D		= 0x80000007,
	Nv2D		= 0x80000007,
	NvCtxSurf2D	= 0x80000008,
	NvCtxSurf2D	= 0x80000008,
+2 −4
Original line number Original line Diff line number Diff line
@@ -104,8 +104,6 @@ struct nouveau_bo {
	struct nouveau_channel *channel;
	struct nouveau_channel *channel;


	struct nouveau_vma vma;
	struct nouveau_vma vma;
	bool mappable;
	bool no_vm;


	uint32_t tile_mode;
	uint32_t tile_mode;
	uint32_t tile_flags;
	uint32_t tile_flags;
@@ -1293,7 +1291,7 @@ extern struct ttm_bo_driver nouveau_bo_driver;
extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
			  int size, int align, uint32_t flags,
			  int size, int align, uint32_t flags,
			  uint32_t tile_mode, uint32_t tile_flags,
			  uint32_t tile_mode, uint32_t tile_flags,
			  bool no_vm, bool mappable, struct nouveau_bo **);
			  struct nouveau_bo **);
extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
extern int nouveau_bo_unpin(struct nouveau_bo *);
extern int nouveau_bo_unpin(struct nouveau_bo *);
extern int nouveau_bo_map(struct nouveau_bo *);
extern int nouveau_bo_map(struct nouveau_bo *);
@@ -1356,7 +1354,7 @@ static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
			   int size, int align, uint32_t flags,
			   int size, int align, uint32_t flags,
			   uint32_t tile_mode, uint32_t tile_flags,
			   uint32_t tile_mode, uint32_t tile_flags,
			   bool no_vm, bool mappable, struct nouveau_bo **);
			   struct nouveau_bo **);
extern int nouveau_gem_object_new(struct drm_gem_object *);
extern int nouveau_gem_object_new(struct drm_gem_object *);
extern void nouveau_gem_object_del(struct drm_gem_object *);
extern void nouveau_gem_object_del(struct drm_gem_object *);
extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
+1 −1
Original line number Original line Diff line number Diff line
@@ -297,7 +297,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
	size = roundup(size, PAGE_SIZE);
	size = roundup(size, PAGE_SIZE);


	ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
	ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
			      0, 0x0000, false, true, &nvbo);
			      0, 0x0000, &nvbo);
	if (ret) {
	if (ret) {
		NV_ERROR(dev, "failed to allocate framebuffer\n");
		NV_ERROR(dev, "failed to allocate framebuffer\n");
		goto out;
		goto out;
Loading