Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a1606a95 authored by Ben Skeggs's avatar Ben Skeggs
Browse files

drm/nouveau: new gem pushbuf interface, bump to 0.0.16



This commit breaks the userspace interface, and requires a new libdrm for
nouveau to operate again.

The multiple GEM_PUSHBUF ioctls that were present in 0.0.15 for
compatibility purposes are now gone, and replaced with the new ioctl which
allows for multiple push buffers to be submitted (necessary for hw index
buffers in the nv50 3d driver) and relocations to be applied on any buffer.

A number of other ioctls (CARD_INIT, GEM_PIN, GEM_UNPIN) that were needed
for userspace modesetting have also been removed.

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Signed-off-by: default avatarFrancisco Jerez <currojerez@riseup.net>
parent d87897d4
Loading
Loading
Loading
Loading
+8 −5
Original line number Original line Diff line number Diff line
@@ -385,6 +385,14 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
		return ret;
		return ret;
	init->channel  = chan->id;
	init->channel  = chan->id;


	if (chan->dma.ib_max)
		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
					NOUVEAU_GEM_DOMAIN_GART;
	else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
	else
		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;

	init->subchan[0].handle = NvM2MF;
	init->subchan[0].handle = NvM2MF;
	if (dev_priv->card_type < NV_50)
	if (dev_priv->card_type < NV_50)
		init->subchan[0].grclass = 0x0039;
		init->subchan[0].grclass = 0x0039;
@@ -424,7 +432,6 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
 ***********************************/
 ***********************************/


struct drm_ioctl_desc nouveau_ioctls[] = {
struct drm_ioctl_desc nouveau_ioctls[] = {
	DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
@@ -434,13 +441,9 @@ struct drm_ioctl_desc nouveau_ioctls[] = {
	DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH),
};
};


int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
+2 −2
Original line number Original line Diff line number Diff line
@@ -179,7 +179,7 @@ READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)


void
void
nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
	      int delta, int dwords)
	      int delta, int length)
{
{
	struct nouveau_bo *pb = chan->pushbuf_bo;
	struct nouveau_bo *pb = chan->pushbuf_bo;
	uint64_t offset = bo->bo.offset + delta;
	uint64_t offset = bo->bo.offset + delta;
@@ -187,7 +187,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,


	BUG_ON(chan->dma.ib_free < 1);
	BUG_ON(chan->dma.ib_free < 1);
	nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
	nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
	nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | dwords << 10);
	nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);


	chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
	chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
	nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
	nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
+2 −2
Original line number Original line Diff line number Diff line
@@ -32,7 +32,7 @@
#endif
#endif


void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
		   int delta, int dwords);
		   int delta, int length);


/*
/*
 * There's a hw race condition where you can't jump to your PUT offset,
 * There's a hw race condition where you can't jump to your PUT offset,
@@ -149,7 +149,7 @@ FIRE_RING(struct nouveau_channel *chan)


	if (chan->dma.ib_max) {
	if (chan->dma.ib_max) {
		nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2,
		nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2,
			      chan->dma.cur - chan->dma.put);
			      (chan->dma.cur - chan->dma.put) << 2);
	} else {
	} else {
		WRITE_PUT(chan->dma.cur);
		WRITE_PUT(chan->dma.cur);
	}
	}
+2 −17
Original line number Original line Diff line number Diff line
@@ -34,7 +34,7 @@


#define DRIVER_MAJOR		0
#define DRIVER_MAJOR		0
#define DRIVER_MINOR		0
#define DRIVER_MINOR		0
#define DRIVER_PATCHLEVEL	15
#define DRIVER_PATCHLEVEL	16


#define NOUVEAU_FAMILY   0x0000FFFF
#define NOUVEAU_FAMILY   0x0000FFFF
#define NOUVEAU_FLAGS    0xFFFF0000
#define NOUVEAU_FLAGS    0xFFFF0000
@@ -83,6 +83,7 @@ struct nouveau_bo {
	struct drm_file *reserved_by;
	struct drm_file *reserved_by;
	struct list_head entry;
	struct list_head entry;
	int pbbo_index;
	int pbbo_index;
	bool validate_mapped;


	struct nouveau_channel *channel;
	struct nouveau_channel *channel;


@@ -704,12 +705,6 @@ extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
			       uint32_t reg, uint32_t mask, uint32_t val);
			       uint32_t reg, uint32_t mask, uint32_t val);
extern bool nouveau_wait_for_idle(struct drm_device *);
extern bool nouveau_wait_for_idle(struct drm_device *);
extern int  nouveau_card_init(struct drm_device *);
extern int  nouveau_card_init(struct drm_device *);
extern int  nouveau_ioctl_card_init(struct drm_device *, void *data,
				    struct drm_file *);
extern int  nouveau_ioctl_suspend(struct drm_device *, void *data,
				  struct drm_file *);
extern int  nouveau_ioctl_resume(struct drm_device *, void *data,
				 struct drm_file *);


/* nouveau_mem.c */
/* nouveau_mem.c */
extern int  nouveau_mem_init_heap(struct mem_block **, uint64_t start,
extern int  nouveau_mem_init_heap(struct mem_block **, uint64_t start,
@@ -1160,16 +1155,6 @@ extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
				 struct drm_file *);
				 struct drm_file *);
extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
				     struct drm_file *);
				     struct drm_file *);
extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *,
					  struct drm_file *);
extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *,
					   struct drm_file *);
extern int nouveau_gem_ioctl_pin(struct drm_device *, void *,
				 struct drm_file *);
extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *,
				   struct drm_file *);
extern int nouveau_gem_ioctl_tile(struct drm_device *, void *,
				  struct drm_file *);
extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
				      struct drm_file *);
				      struct drm_file *);
extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
+162 −317
Original line number Original line Diff line number Diff line
@@ -243,6 +243,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
			nouveau_fence_unref((void *)&prev_fence);
			nouveau_fence_unref((void *)&prev_fence);
		}
		}


		if (unlikely(nvbo->validate_mapped)) {
			ttm_bo_kunmap(&nvbo->kmap);
			nvbo->validate_mapped = false;
		}

		list_del(&nvbo->entry);
		list_del(&nvbo->entry);
		nvbo->reserved_by = NULL;
		nvbo->reserved_by = NULL;
		ttm_bo_unreserve(&nvbo->bo);
		ttm_bo_unreserve(&nvbo->bo);
@@ -302,11 +307,14 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
			if (ret == -EAGAIN)
			if (ret == -EAGAIN)
				ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
				ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
			drm_gem_object_unreference(gem);
			drm_gem_object_unreference(gem);
			if (ret)
			if (ret) {
				NV_ERROR(dev, "fail reserve\n");
				return ret;
				return ret;
			}
			goto retry;
			goto retry;
		}
		}


		b->user_priv = (uint64_t)(unsigned long)nvbo;
		nvbo->reserved_by = file_priv;
		nvbo->reserved_by = file_priv;
		nvbo->pbbo_index = i;
		nvbo->pbbo_index = i;
		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
@@ -336,8 +344,10 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
			}
			}


			ret = ttm_bo_wait_cpu(&nvbo->bo, false);
			ret = ttm_bo_wait_cpu(&nvbo->bo, false);
			if (ret)
			if (ret) {
				NV_ERROR(dev, "fail wait_cpu\n");
				return ret;
				return ret;
			}
			goto retry;
			goto retry;
		}
		}
	}
	}
@@ -351,6 +361,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
{
{
	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
				(void __force __user *)(uintptr_t)user_pbbo_ptr;
				(void __force __user *)(uintptr_t)user_pbbo_ptr;
	struct drm_device *dev = chan->dev;
	struct nouveau_bo *nvbo;
	struct nouveau_bo *nvbo;
	int ret, relocs = 0;
	int ret, relocs = 0;


@@ -362,39 +373,46 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
			spin_lock(&nvbo->bo.lock);
			spin_lock(&nvbo->bo.lock);
			ret = ttm_bo_wait(&nvbo->bo, false, false, false);
			ret = ttm_bo_wait(&nvbo->bo, false, false, false);
			spin_unlock(&nvbo->bo.lock);
			spin_unlock(&nvbo->bo.lock);
			if (unlikely(ret))
			if (unlikely(ret)) {
				NV_ERROR(dev, "fail wait other chan\n");
				return ret;
				return ret;
			}
			}
		}


		ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
		ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
					     b->write_domains,
					     b->write_domains,
					     b->valid_domains);
					     b->valid_domains);
		if (unlikely(ret))
		if (unlikely(ret)) {
			NV_ERROR(dev, "fail set_domain\n");
			return ret;
			return ret;
		}


		nvbo->channel = chan;
		nvbo->channel = chan;
		ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
		ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
				      false, false);
				      false, false);
		nvbo->channel = NULL;
		nvbo->channel = NULL;
		if (unlikely(ret))
		if (unlikely(ret)) {
			NV_ERROR(dev, "fail ttm_validate\n");
			return ret;
			return ret;
		}


		if (nvbo->bo.offset == b->presumed_offset &&
		if (nvbo->bo.offset == b->presumed.offset &&
		    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
		    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
		      b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
		      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
		     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
		     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
		      b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART)))
		      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
			continue;
			continue;


		if (nvbo->bo.mem.mem_type == TTM_PL_TT)
		if (nvbo->bo.mem.mem_type == TTM_PL_TT)
			b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART;
			b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
		else
		else
			b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM;
			b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
		b->presumed_offset = nvbo->bo.offset;
		b->presumed.offset = nvbo->bo.offset;
		b->presumed_ok = 0;
		b->presumed.valid = 0;
		relocs++;
		relocs++;


		if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b)))
		if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
				     &b->presumed, sizeof(b->presumed)))
			return -EFAULT;
			return -EFAULT;
	}
	}


@@ -408,6 +426,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
			     uint64_t user_buffers, int nr_buffers,
			     uint64_t user_buffers, int nr_buffers,
			     struct validate_op *op, int *apply_relocs)
			     struct validate_op *op, int *apply_relocs)
{
{
	struct drm_device *dev = chan->dev;
	int ret, relocs = 0;
	int ret, relocs = 0;


	INIT_LIST_HEAD(&op->vram_list);
	INIT_LIST_HEAD(&op->vram_list);
@@ -418,11 +437,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
		return 0;
		return 0;


	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
	if (unlikely(ret))
	if (unlikely(ret)) {
		NV_ERROR(dev, "validate_init\n");
		return ret;
		return ret;
	}


	ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
	ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
	if (unlikely(ret < 0)) {
	if (unlikely(ret < 0)) {
		NV_ERROR(dev, "validate vram_list\n");
		validate_fini(op, NULL);
		validate_fini(op, NULL);
		return ret;
		return ret;
	}
	}
@@ -430,6 +452,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,


	ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
	ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
	if (unlikely(ret < 0)) {
	if (unlikely(ret < 0)) {
		NV_ERROR(dev, "validate gart_list\n");
		validate_fini(op, NULL);
		validate_fini(op, NULL);
		return ret;
		return ret;
	}
	}
@@ -437,6 +460,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,


	ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
	ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
	if (unlikely(ret < 0)) {
	if (unlikely(ret < 0)) {
		NV_ERROR(dev, "validate both_list\n");
		validate_fini(op, NULL);
		validate_fini(op, NULL);
		return ret;
		return ret;
	}
	}
@@ -465,59 +489,82 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
}
}


static int
static int
nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
				struct drm_nouveau_gem_pushbuf_bo *bo,
				struct drm_nouveau_gem_pushbuf *req,
				unsigned nr_relocs, uint64_t ptr_relocs,
				struct drm_nouveau_gem_pushbuf_bo *bo)
				unsigned nr_dwords, unsigned first_dword,
				uint32_t *pushbuf, bool is_iomem)
{
{
	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
	struct drm_device *dev = chan->dev;
	int ret = 0;
	int ret = 0;
	unsigned i;
	unsigned i;


	reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
	if (IS_ERR(reloc))
	if (IS_ERR(reloc))
		return PTR_ERR(reloc);
		return PTR_ERR(reloc);


	for (i = 0; i < nr_relocs; i++) {
	for (i = 0; i < req->nr_relocs; i++) {
		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
		struct drm_nouveau_gem_pushbuf_bo *b;
		struct drm_nouveau_gem_pushbuf_bo *b;
		struct nouveau_bo *nvbo;
		uint32_t data;
		uint32_t data;


		if (r->bo_index >= nr_bo || r->reloc_index < first_dword ||
		if (unlikely(r->bo_index > req->nr_buffers)) {
		    r->reloc_index >= first_dword + nr_dwords) {
			NV_ERROR(dev, "reloc bo index invalid\n");
			NV_ERROR(dev, "Bad relocation %d\n", i);
			NV_ERROR(dev, "  bo: %d max %d\n", r->bo_index, nr_bo);
			NV_ERROR(dev, "  id: %d max %d\n", r->reloc_index, nr_dwords);
			ret = -EINVAL;
			ret = -EINVAL;
			break;
			break;
		}
		}


		b = &bo[r->bo_index];
		b = &bo[r->bo_index];
		if (b->presumed_ok)
		if (b->presumed.valid)
			continue;
			continue;


		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
			NV_ERROR(dev, "reloc container bo index invalid\n");
			ret = -EINVAL;
			break;
		}
		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;

		if (unlikely(r->reloc_bo_offset + 4 >
			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
			NV_ERROR(dev, "reloc outside of bo\n");
			ret = -EINVAL;
			break;
		}

		if (!nvbo->kmap.virtual) {
			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
					  &nvbo->kmap);
			if (ret) {
				NV_ERROR(dev, "failed kmap for reloc\n");
				break;
			}
			nvbo->validate_mapped = true;
		}

		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
			data = b->presumed_offset + r->data;
			data = b->presumed.offset + r->data;
		else
		else
		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
			data = (b->presumed_offset + r->data) >> 32;
			data = (b->presumed.offset + r->data) >> 32;
		else
		else
			data = r->data;
			data = r->data;


		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
			if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART)
			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
				data |= r->tor;
				data |= r->tor;
			else
			else
				data |= r->vor;
				data |= r->vor;
		}
		}


		if (is_iomem)
		spin_lock(&nvbo->bo.lock);
			iowrite32_native(data, (void __force __iomem *)
		ret = ttm_bo_wait(&nvbo->bo, false, false, false);
						&pushbuf[r->reloc_index]);
		if (ret) {
		else
			NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
			pushbuf[r->reloc_index] = data;
			break;
		}
		spin_unlock(&nvbo->bo.lock);

		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
	}
	}


	kfree(reloc);
	kfree(reloc);
@@ -528,125 +575,50 @@ int
nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
			  struct drm_file *file_priv)
			  struct drm_file *file_priv)
{
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct drm_nouveau_gem_pushbuf *req = data;
	struct drm_nouveau_gem_pushbuf *req = data;
	struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
	struct drm_nouveau_gem_pushbuf_push *push;
	struct drm_nouveau_gem_pushbuf_bo *bo;
	struct nouveau_channel *chan;
	struct nouveau_channel *chan;
	struct validate_op op;
	struct validate_op op;
	struct nouveau_fence *fence = 0;
	struct nouveau_fence *fence = 0;
	uint32_t *pushbuf = NULL;
	int i, j, ret = 0, do_reloc = 0;
	int ret = 0, do_reloc = 0, i;


	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);


	if (req->nr_dwords >= chan->dma.max ||
	req->vram_available = dev_priv->fb_aper_free;
	    req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
	req->gart_available = dev_priv->gart_info.aper_free;
	    req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
	if (unlikely(req->nr_push == 0))
		NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
		goto out_next;
		NV_ERROR(dev, "  dwords : %d max %d\n", req->nr_dwords,
			 chan->dma.max - 1);
		NV_ERROR(dev, "  buffers: %d max %d\n", req->nr_buffers,
			 NOUVEAU_GEM_MAX_BUFFERS);
		NV_ERROR(dev, "  relocs : %d max %d\n", req->nr_relocs,
			 NOUVEAU_GEM_MAX_RELOCS);
		return -EINVAL;
	}

	pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t));
	if (IS_ERR(pushbuf))
		return PTR_ERR(pushbuf);

	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
	if (IS_ERR(bo)) {
		kfree(pushbuf);
		return PTR_ERR(bo);
	}

	mutex_lock(&dev->struct_mutex);

	/* Validate buffer list */
	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
					   req->nr_buffers, &op, &do_reloc);
	if (ret)
		goto out;

	/* Apply any relocations that are required */
	if (do_reloc) {
		ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers,
						      bo, req->nr_relocs,
						      req->relocs,
						      req->nr_dwords, 0,
						      pushbuf, false);
		if (ret)
			goto out;
	}

	/* Emit push buffer to the hw
	 */
	ret = RING_SPACE(chan, req->nr_dwords);
	if (ret)
		goto out;

	OUT_RINGp(chan, pushbuf, req->nr_dwords);

	ret = nouveau_fence_new(chan, &fence, true);
	if (ret) {
		NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
		WIND_RING(chan);
		goto out;
	}


	if (nouveau_gem_pushbuf_sync(chan)) {
	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
		ret = nouveau_fence_wait(fence, NULL, false, false);
		NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
		if (ret) {
			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
			for (i = 0; i < req->nr_dwords; i++)
		return -EINVAL;
				NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
			NV_ERROR(dev, "^^ above push buffer is fail :(\n");
		}
	}
	}


out:
	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
	validate_fini(&op, fence);
		NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
	nouveau_fence_unref((void**)&fence);
			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
	mutex_unlock(&dev->struct_mutex);
		return -EINVAL;
	kfree(pushbuf);
	kfree(bo);
	return ret;
	}
	}


int
	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
		NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
			       struct drm_file *file_priv)
			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct drm_nouveau_gem_pushbuf_call *req = data;
	struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
	struct nouveau_channel *chan;
	struct drm_gem_object *gem;
	struct nouveau_bo *pbbo;
	struct validate_op op;
	struct nouveau_fence* fence = 0;
	int i, ret = 0, do_reloc = 0;

	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);

	if (unlikely(req->handle == 0))
		goto out_next;

	if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
	    req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
		NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
		NV_ERROR(dev, "  buffers: %d max %d\n", req->nr_buffers,
			 NOUVEAU_GEM_MAX_BUFFERS);
		NV_ERROR(dev, "  relocs : %d max %d\n", req->nr_relocs,
			 NOUVEAU_GEM_MAX_RELOCS);
		return -EINVAL;
		return -EINVAL;
	}
	}


	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
	if (IS_ERR(push))
		return PTR_ERR(push);

	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
	if (IS_ERR(bo))
	if (IS_ERR(bo)) {
		kfree(push);
		return PTR_ERR(bo);
		return PTR_ERR(bo);
	}


	mutex_lock(&dev->struct_mutex);
	mutex_lock(&dev->struct_mutex);


@@ -658,94 +630,9 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
		goto out;
		goto out;
	}
	}


	/* Validate DMA push buffer */
	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
	if (!gem) {
		NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle);
		ret = -EINVAL;
		goto out;
	}
	pbbo = nouveau_gem_object(gem);

	if ((req->offset & 3) || req->nr_dwords < 2 ||
	    (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
	    (unsigned long)req->nr_dwords >
	     ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
		NV_ERROR(dev, "pb call misaligned or out of bounds: "
			      "%d + %d * 4 > %ld\n",
			 req->offset, req->nr_dwords, pbbo->bo.mem.size);
		ret = -EINVAL;
		drm_gem_object_unreference(gem);
		goto out;
	}

	ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
			     chan->fence.sequence);
	if (ret) {
		NV_ERROR(dev, "resv pb: %d\n", ret);
		drm_gem_object_unreference(gem);
		goto out;
	}

	nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type);
	ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false);
	if (ret) {
		NV_ERROR(dev, "validate pb: %d\n", ret);
		ttm_bo_unreserve(&pbbo->bo);
		drm_gem_object_unreference(gem);
		goto out;
	}

	list_add_tail(&pbbo->entry, &op.both_list);

	/* If presumed return address doesn't match, we need to map the
	 * push buffer and fix it..
	 */
	if (dev_priv->card_type < NV_20) {
		uint32_t retaddy;

		if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) {
			ret = nouveau_dma_wait(chan, 0, 4 + NOUVEAU_DMA_SKIPS);
			if (ret) {
				NV_ERROR(dev, "jmp_space: %d\n", ret);
				goto out;
			}
		}

		retaddy  = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
		retaddy |= 0x20000000;
		if (retaddy != req->suffix0) {
			req->suffix0 = retaddy;
			do_reloc = 1;
		}
	}

	/* Apply any relocations that are required */
	/* Apply any relocations that are required */
	if (do_reloc) {
	if (do_reloc) {
		void *pbvirt;
		ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
		bool is_iomem;
		ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages,
				  &pbbo->kmap);
		if (ret) {
			NV_ERROR(dev, "kmap pb: %d\n", ret);
			goto out;
		}

		pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem);
		ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo,
						      req->nr_relocs,
						      req->relocs,
						      req->nr_dwords,
						      req->offset / 4,
						      pbvirt, is_iomem);

		if (dev_priv->card_type < NV_20) {
			nouveau_bo_wr32(pbbo,
					req->offset / 4 + req->nr_dwords - 2,
					req->suffix0);
		}

		ttm_bo_kunmap(&pbbo->kmap);
		if (ret) {
		if (ret) {
			NV_ERROR(dev, "reloc apply: %d\n", ret);
			NV_ERROR(dev, "reloc apply: %d\n", ret);
			goto out;
			goto out;
@@ -753,37 +640,75 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
	}
	}


	if (chan->dma.ib_max) {
	if (chan->dma.ib_max) {
		ret = nouveau_dma_wait(chan, 2, 6);
		ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
		if (ret) {
		if (ret) {
			NV_INFO(dev, "nv50cal_space: %d\n", ret);
			NV_INFO(dev, "nv50cal_space: %d\n", ret);
			goto out;
			goto out;
		}
		}


		nv50_dma_push(chan, pbbo, req->offset, req->nr_dwords);
		for (i = 0; i < req->nr_push; i++) {
			struct nouveau_bo *nvbo = (void *)(unsigned long)
				bo[push[i].bo_index].user_priv;

			nv50_dma_push(chan, nvbo, push[i].offset,
				      push[i].length);
		}
	} else
	} else
	if (dev_priv->card_type >= NV_20) {
	if (dev_priv->card_type >= NV_20) {
		ret = RING_SPACE(chan, 2);
		ret = RING_SPACE(chan, req->nr_push * 2);
		if (ret) {
		if (ret) {
			NV_ERROR(dev, "cal_space: %d\n", ret);
			NV_ERROR(dev, "cal_space: %d\n", ret);
			goto out;
			goto out;
		}
		}
		OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +

				  req->offset) | 2);
		for (i = 0; i < req->nr_push; i++) {
			struct nouveau_bo *nvbo = (void *)(unsigned long)
				bo[push[i].bo_index].user_priv;
			struct drm_mm_node *mem = nvbo->bo.mem.mm_node;

			OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
					push[i].offset) | 2);
			OUT_RING(chan, 0);
			OUT_RING(chan, 0);
		}
	} else {
	} else {
		ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS);
		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
		if (ret) {
		if (ret) {
			NV_ERROR(dev, "jmp_space: %d\n", ret);
			NV_ERROR(dev, "jmp_space: %d\n", ret);
			goto out;
			goto out;
		}
		}
		OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
				  req->offset) | 0x20000000);
		OUT_RING(chan, 0);


		/* Space the jumps apart with NOPs. */
		for (i = 0; i < req->nr_push; i++) {
		for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
			struct nouveau_bo *nvbo = (void *)(unsigned long)
				bo[push[i].bo_index].user_priv;
			struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
			uint32_t cmd;

			cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
			cmd |= 0x20000000;
			if (unlikely(cmd != req->suffix0)) {
				if (!nvbo->kmap.virtual) {
					ret = ttm_bo_kmap(&nvbo->bo, 0,
							  nvbo->bo.mem.
							  num_pages,
							  &nvbo->kmap);
					if (ret) {
						WIND_RING(chan);
						goto out;
					}
					nvbo->validate_mapped = true;
				}

				nouveau_bo_wr32(nvbo, (push[i].offset +
						push[i].length - 8) / 4, cmd);
			}

			OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
					push[i].offset) | 0x20000000);
			OUT_RING(chan, 0);
			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
				OUT_RING(chan, 0);
				OUT_RING(chan, 0);
		}
		}
	}


	ret = nouveau_fence_new(chan, &fence, true);
	ret = nouveau_fence_new(chan, &fence, true);
	if (ret) {
	if (ret) {
@@ -797,6 +722,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
	nouveau_fence_unref((void**)&fence);
	nouveau_fence_unref((void**)&fence);
	mutex_unlock(&dev->struct_mutex);
	mutex_unlock(&dev->struct_mutex);
	kfree(bo);
	kfree(bo);
	kfree(push);


out_next:
out_next:
	if (chan->dma.ib_max) {
	if (chan->dma.ib_max) {
@@ -815,19 +741,6 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
	return ret;
	return ret;
}
}


int
nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data,
				struct drm_file *file_priv)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct drm_nouveau_gem_pushbuf_call *req = data;

	req->vram_available = dev_priv->fb_aper_free;
	req->gart_available = dev_priv->gart_info.aper_free;

	return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv);
}

static inline uint32_t
static inline uint32_t
domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
{
{
@@ -841,74 +754,6 @@ domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
	return flags;
	return flags;
}
}


int
nouveau_gem_ioctl_pin(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
	struct drm_nouveau_gem_pin *req = data;
	struct drm_gem_object *gem;
	struct nouveau_bo *nvbo;
	int ret = 0;

	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		NV_ERROR(dev, "pin only allowed without kernel modesetting\n");
		return -EINVAL;
	}

	if (!DRM_SUSER(DRM_CURPROC))
		return -EPERM;

	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
	if (!gem)
		return -EINVAL;
	nvbo = nouveau_gem_object(gem);

	ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain));
	if (ret)
		goto out;

	req->offset = nvbo->bo.offset;
	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
		req->domain = NOUVEAU_GEM_DOMAIN_GART;
	else
		req->domain = NOUVEAU_GEM_DOMAIN_VRAM;

out:
	mutex_lock(&dev->struct_mutex);
	drm_gem_object_unreference(gem);
	mutex_unlock(&dev->struct_mutex);

	return ret;
}

int
nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
	struct drm_nouveau_gem_pin *req = data;
	struct drm_gem_object *gem;
	int ret;

	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;

	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return -EINVAL;

	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
	if (!gem)
		return -EINVAL;

	ret = nouveau_bo_unpin(nouveau_gem_object(gem));

	mutex_lock(&dev->struct_mutex);
	drm_gem_object_unreference(gem);
	mutex_unlock(&dev->struct_mutex);

	return ret;
}

int
int
nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
			   struct drm_file *file_priv)
			   struct drm_file *file_priv)
Loading