Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 637319c6 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-next-4.19' of git://people.freedesktop.org/~agd5f/linux into drm-next



Fixes for 4.19:
- Add VCN PSP FW loading for RV (this is required on upcoming parts)
- Fix scheduler setup ordering for VCE and UVD
- Few misc display fixes

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180816181840.2786-1-alexander.deucher@amd.com
parents d32e2c6d c9533d1b
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -131,6 +131,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
		msleep(1);
	}

	if (ucode) {
		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
	}

	return ret;
}

+4 −0
Original line number Diff line number Diff line
@@ -194,6 +194,7 @@ enum AMDGPU_UCODE_ID {
	AMDGPU_UCODE_ID_SMC,
	AMDGPU_UCODE_ID_UVD,
	AMDGPU_UCODE_ID_VCE,
	AMDGPU_UCODE_ID_VCN,
	AMDGPU_UCODE_ID_MAXIMUM,
};

@@ -226,6 +227,9 @@ struct amdgpu_firmware_info {
	void *kaddr;
	/* ucode_size_bytes */
	uint32_t ucode_size;
	/* starting tmr mc address */
	uint32_t tmr_mc_addr_lo;
	uint32_t tmr_mc_addr_hi;
};

void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
+26 −12
Original line number Diff line number Diff line
@@ -122,8 +122,6 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work);

int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
{
	struct amdgpu_ring *ring;
	struct drm_sched_rq *rq;
	unsigned long bo_size;
	const char *fw_name;
	const struct common_firmware_header *hdr;
@@ -266,13 +264,6 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
		}
	}

	ring = &adev->uvd.inst[0].ring;
	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
	r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
	if (r) {
		DRM_ERROR("Failed setting up UVD kernel entity.\n");
		return r;
	}
	for (i = 0; i < adev->uvd.max_handles; ++i) {
		atomic_set(&adev->uvd.handles[i], 0);
		adev->uvd.filp[i] = NULL;
@@ -311,7 +302,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
		if (adev->uvd.harvest_config & (1 << j))
			continue;
		kfree(adev->uvd.inst[j].saved_bo);
		kvfree(adev->uvd.inst[j].saved_bo);

		amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
				      &adev->uvd.inst[j].gpu_addr,
@@ -327,6 +318,29 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
	return 0;
}

/**
 * amdgpu_uvd_entity_init - init entity
 *
 * @adev: amdgpu_device pointer
 *
 */
int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
{
	struct amdgpu_ring *ring;
	struct drm_sched_rq *rq;
	int r;

	ring = &adev->uvd.inst[0].ring;
	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
	r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
	if (r) {
		DRM_ERROR("Failed setting up UVD kernel entity.\n");
		return r;
	}

	return 0;
}

int amdgpu_uvd_suspend(struct amdgpu_device *adev)
{
	unsigned size;
@@ -354,7 +368,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
		size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
		ptr = adev->uvd.inst[j].cpu_addr;

		adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
		adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL);
		if (!adev->uvd.inst[j].saved_bo)
			return -ENOMEM;

@@ -380,7 +394,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)

		if (adev->uvd.inst[i].saved_bo != NULL) {
			memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
			kfree(adev->uvd.inst[i].saved_bo);
			kvfree(adev->uvd.inst[i].saved_bo);
			adev->uvd.inst[i].saved_bo = NULL;
		} else {
			const struct common_firmware_header *hdr;
+1 −0
Original line number Diff line number Diff line
@@ -69,6 +69,7 @@ struct amdgpu_uvd {

int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
int amdgpu_uvd_entity_init(struct amdgpu_device *adev);
int amdgpu_uvd_suspend(struct amdgpu_device *adev);
int amdgpu_uvd_resume(struct amdgpu_device *adev);
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+23 −10
Original line number Diff line number Diff line
@@ -90,8 +90,6 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work);
 */
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
{
	struct amdgpu_ring *ring;
	struct drm_sched_rq *rq;
	const char *fw_name;
	const struct common_firmware_header *hdr;
	unsigned ucode_version, version_major, version_minor, binary_id;
@@ -188,14 +186,6 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
		return r;
	}

	ring = &adev->vce.ring[0];
	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
	r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
	if (r != 0) {
		DRM_ERROR("Failed setting up VCE run queue.\n");
		return r;
	}

	for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
		atomic_set(&adev->vce.handles[i], 0);
		adev->vce.filp[i] = NULL;
@@ -235,6 +225,29 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
	return 0;
}

/**
 * amdgpu_vce_entity_init - init entity
 *
 * @adev: amdgpu_device pointer
 *
 */
int amdgpu_vce_entity_init(struct amdgpu_device *adev)
{
	struct amdgpu_ring *ring;
	struct drm_sched_rq *rq;
	int r;

	ring = &adev->vce.ring[0];
	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
	r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
	if (r != 0) {
		DRM_ERROR("Failed setting up VCE run queue.\n");
		return r;
	}

	return 0;
}

/**
 * amdgpu_vce_suspend - unpin VCE fw memory
 *
Loading