Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dc9be8e2 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-next-4.2-amdgpu' of git://people.freedesktop.org/~agd5f/linux into drm-next

More fixes for amdgpu for 4.2.  We've integrated Jerome's comments
about the interface among other things.  I'll be on vacation next week
so Christian will be handling any updates next week.

* 'drm-next-4.2-amdgpu' of git://people.freedesktop.org/~agd5f/linux: (23 commits)
  drm/amdgpu: fix a amdgpu_dpm=0 bug
  drm/amdgpu: don't enable/disable display twice on suspend/resume
  drm/amdgpu: fix UVD/VCE VM emulation
  drm/amdgpu: enable vce powergating
  drm/amdgpu/iceland: don't call smu_init on resume
  drm/amdgpu/tonga: don't call smu_init on resume
  drm/amdgpu/cz: don't call smu_init on resume
  drm/amdgpu: update to latest gfx8 golden register settings
  drm/amdgpu: whitespace cleanup in gmc8 golden regs
  drm/admgpu: move XDMA golden registers to dce code
  drm/amdgpu: fix the build on big endian
  drm/amdgpu: cleanup UAPI comments
  drm/amdgpu: remove AMDGPU_CTX_OP_STATE_RUNNING
  drm/amdgpu: remove the VI hardware semaphore in ring sync
  drm/amdgpu: set the gfx config properly for all CZ variants (v2)
  drm/amdgpu: also print the pci revision when printing the pci ids
  drm/amdgpu: cleanup VA IOCTL
  drm/amdgpu: fix saddr handling in amdgpu_vm_bo_unmap
  drm/amdgpu: fix amdgpu_vm_bo_map
  drm/amdgpu: remove unused AMDGPU_IB_FLAG_GDS
  ...
parents ae455773 6d8db6ce
Loading
Loading
Loading
Loading
+7 −2
Original line number Diff line number Diff line
@@ -317,7 +317,7 @@ struct amdgpu_ring_funcs {
	void (*emit_ib)(struct amdgpu_ring *ring,
			struct amdgpu_ib *ib);
	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
			   uint64_t seq, bool write64bit);
			   uint64_t seq, unsigned flags);
	bool (*emit_semaphore)(struct amdgpu_ring *ring,
			       struct amdgpu_semaphore *semaphore,
			       bool emit_wait);
@@ -392,6 +392,9 @@ struct amdgpu_fence_driver {
#define AMDGPU_FENCE_OWNER_VM		((void*)1ul)
#define AMDGPU_FENCE_OWNER_MOVE		((void*)2ul)

#define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
#define AMDGPU_FENCE_FLAG_INT           (1 << 1)

struct amdgpu_fence {
	struct fence base;

@@ -1506,6 +1509,7 @@ struct amdgpu_dpm_funcs {
	int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
	bool (*vblank_too_short)(struct amdgpu_device *adev);
	void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
	void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
	void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
	void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
	u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
@@ -2142,7 +2146,7 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
#define amdgpu_ring_emit_fence(r, addr, seq, write64bit) (r)->funcs->emit_fence((r), (addr), (seq), (write64bit))
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
#define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait))
#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
@@ -2179,6 +2183,7 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
#define amdgpu_dpm_force_performance_level(adev, l) (adev)->pm.funcs->force_performance_level((adev), (l))
#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
#define amdgpu_dpm_powergate_uvd(adev, g) (adev)->pm.funcs->powergate_uvd((adev), (g))
#define amdgpu_dpm_powergate_vce(adev, g) (adev)->pm.funcs->powergate_vce((adev), (g))
#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
#define amdgpu_dpm_set_fan_control_mode(adev, m) (adev)->pm.funcs->set_fan_control_mode((adev), (m))
#define amdgpu_dpm_get_fan_control_mode(adev) (adev)->pm.funcs->get_fan_control_mode((adev))
+15 −3
Original line number Diff line number Diff line
@@ -564,21 +564,33 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
			return r;

		if (ring->funcs->parse_cs) {
			struct amdgpu_bo_va_mapping *m;
			struct amdgpu_bo *aobj = NULL;
			void *kptr;
			uint64_t offset;
			uint8_t *kptr;

			amdgpu_cs_find_mapping(parser, chunk_ib->va_start, &aobj);
			m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
						   &aobj);
			if (!aobj) {
				DRM_ERROR("IB va_start is invalid\n");
				return -EINVAL;
			}

			if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
			    (m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
				DRM_ERROR("IB va_start+ib_bytes is invalid\n");
				return -EINVAL;
			}

			/* the IB should be reserved at this point */
			r = amdgpu_bo_kmap(aobj, &kptr);
			r = amdgpu_bo_kmap(aobj, (void **)&kptr);
			if (r) {
				return r;
			}

			offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
			kptr += chunk_ib->va_start - offset;

			r =  amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib);
			if (r) {
				DRM_ERROR("Failed to get ib !\n");
+3 −3
Original line number Diff line number Diff line
@@ -1388,9 +1388,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;

	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
		pdev->subsystem_vendor, pdev->subsystem_device);
		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);

	/* mutex initialization are all done here so we
	 * can recall function without having locking issues */
+7 −1
Original line number Diff line number Diff line
@@ -128,7 +128,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
	fence_init(&(*fence)->base, &amdgpu_fence_ops,
		&adev->fence_queue.lock, adev->fence_context + ring->idx,
		(*fence)->seq);
	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, (*fence)->seq, false);
	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
			       (*fence)->seq,
			       AMDGPU_FENCE_FLAG_INT);
	trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq);
	return 0;
}
@@ -522,6 +524,10 @@ long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, u64 *target_seq,
	bool signaled;
	int i, r;

	if (timeout == 0) {
		return amdgpu_fence_any_seq_signaled(adev, target_seq);
	}

	while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) {

		/* Save current sequence values, used to check for GPU lockups */
+22 −43
Original line number Diff line number Diff line
@@ -37,6 +37,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
	if (robj) {
		if (robj->gem_base.import_attach)
			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
		amdgpu_mn_unregister(robj);
		amdgpu_bo_unref(&robj);
	}
}
@@ -504,7 +505,7 @@ error_free:
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *filp)
{
	union drm_amdgpu_gem_va *args = data;
	struct drm_amdgpu_gem_va *args = data;
	struct drm_gem_object *gobj;
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = filp->driver_priv;
@@ -513,95 +514,73 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
	uint32_t invalid_flags, va_flags = 0;
	int r = 0;

	if (!adev->vm_manager.enabled) {
		memset(args, 0, sizeof(*args));
		args->out.result = AMDGPU_VA_RESULT_ERROR;
	if (!adev->vm_manager.enabled)
		return -ENOTTY;
	}

	if (args->in.va_address < AMDGPU_VA_RESERVED_SIZE) {
	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
		dev_err(&dev->pdev->dev,
			"va_address 0x%lX is in reserved area 0x%X\n",
			(unsigned long)args->in.va_address,
			(unsigned long)args->va_address,
			AMDGPU_VA_RESERVED_SIZE);
		memset(args, 0, sizeof(*args));
		args->out.result = AMDGPU_VA_RESULT_ERROR;
		return -EINVAL;
	}

	invalid_flags = ~(AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
			AMDGPU_VM_PAGE_EXECUTABLE);
	if ((args->in.flags & invalid_flags)) {
	if ((args->flags & invalid_flags)) {
		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
			args->in.flags, invalid_flags);
		memset(args, 0, sizeof(*args));
		args->out.result = AMDGPU_VA_RESULT_ERROR;
			args->flags, invalid_flags);
		return -EINVAL;
	}

	switch (args->in.operation) {
	switch (args->operation) {
	case AMDGPU_VA_OP_MAP:
	case AMDGPU_VA_OP_UNMAP:
		break;
	default:
		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
			args->in.operation);
		memset(args, 0, sizeof(*args));
		args->out.result = AMDGPU_VA_RESULT_ERROR;
			args->operation);
		return -EINVAL;
	}

	gobj = drm_gem_object_lookup(dev, filp, args->in.handle);
	if (gobj == NULL) {
		memset(args, 0, sizeof(*args));
		args->out.result = AMDGPU_VA_RESULT_ERROR;
	gobj = drm_gem_object_lookup(dev, filp, args->handle);
	if (gobj == NULL)
		return -ENOENT;
	}

	rbo = gem_to_amdgpu_bo(gobj);
	r = amdgpu_bo_reserve(rbo, false);
	if (r) {
		if (r != -ERESTARTSYS) {
			memset(args, 0, sizeof(*args));
			args->out.result = AMDGPU_VA_RESULT_ERROR;
		}
		drm_gem_object_unreference_unlocked(gobj);
		return r;
	}

	bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
	if (!bo_va) {
		memset(args, 0, sizeof(*args));
		args->out.result = AMDGPU_VA_RESULT_ERROR;
		drm_gem_object_unreference_unlocked(gobj);
		amdgpu_bo_unreserve(rbo);
		return -ENOENT;
	}

	switch (args->in.operation) {
	switch (args->operation) {
	case AMDGPU_VA_OP_MAP:
		if (args->in.flags & AMDGPU_VM_PAGE_READABLE)
		if (args->flags & AMDGPU_VM_PAGE_READABLE)
			va_flags |= AMDGPU_PTE_READABLE;
		if (args->in.flags & AMDGPU_VM_PAGE_WRITEABLE)
		if (args->flags & AMDGPU_VM_PAGE_WRITEABLE)
			va_flags |= AMDGPU_PTE_WRITEABLE;
		if (args->in.flags & AMDGPU_VM_PAGE_EXECUTABLE)
		if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE)
			va_flags |= AMDGPU_PTE_EXECUTABLE;
		r = amdgpu_vm_bo_map(adev, bo_va, args->in.va_address,
				     args->in.offset_in_bo, args->in.map_size,
		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
				     args->offset_in_bo, args->map_size,
				     va_flags);
		break;
	case AMDGPU_VA_OP_UNMAP:
		r = amdgpu_vm_bo_unmap(adev, bo_va, args->in.va_address);
		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
		break;
	default:
		break;
	}

	if (!r) {
	if (!r)
		amdgpu_gem_va_update_vm(adev, bo_va);
		memset(args, 0, sizeof(*args));
		args->out.result = AMDGPU_VA_RESULT_OK;
	} else {
		memset(args, 0, sizeof(*args));
		args->out.result = AMDGPU_VA_RESULT_ERROR;
	}

	drm_gem_object_unreference_unlocked(gobj);
	return r;
Loading