Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bb7a9c8d authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux into drm-next

More new stuff for 4.15. Highlights:
- Add clock query interface for raven
- Add new FENCE_TO_HANDLE ioctl
- UVD video encode ring support on polaris
- transparent huge page DMA support
- deadlock fixes
- compute pipe lru tweaks
- powerplay cleanups and regression fixes
- fix duplicate symbol issue with radeon and amdgpu
- misc bug fixes

* 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux: (72 commits)
  drm/radeon/dp: make radeon_dp_get_dp_link_config static
  drm/radeon: move ci_send_msg_to_smc to where it's used
  drm/amd/sched: fix deadlock caused by unsignaled fences of deleted jobs
  drm/amd/sched: NULL out the s_fence field after run_job
  drm/amd/sched: move adding finish callback to amd_sched_job_begin
  drm/amd/sched: fix an outdated comment
  drm/amd/sched: rename amd_sched_entity_pop_job
  drm/amdgpu: minor coding style fix
  drm/ttm: add transparent huge page support for DMA allocations v2
  drm/ttm: add support for different pool sizes
  drm/ttm: remove unsued options from ttm_mem_global_alloc_page
  drm/amdgpu: add uvd enc irq
  drm/amdgpu: add uvd enc ib test
  drm/amdgpu: add uvd enc ring test
  drm/amdgpu: add uvd enc vm functions (v2)
  drm/amdgpu: add uvd enc into run queue
  drm/amdgpu: add uvd enc rings
  drm/amdgpu: add new uvd enc ring methods
  drm/amdgpu: add uvd enc command in header
  drm/amdgpu: add uvd enc registers in header
  ...
parents 15438ab0 d3f04c98
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -121,6 +121,7 @@ extern int amdgpu_cntl_sb_buf_per_se;
extern int amdgpu_param_buf_per_se;
extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe;

#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -1310,6 +1311,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
			struct drm_file *filp);
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *filp);
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp);
@@ -1524,7 +1527,6 @@ struct amdgpu_device {

	/* powerplay */
	struct amd_powerplay		powerplay;
	bool				pp_enabled;
	bool				pp_force_state_enabled;

	/* dpm */
+9 −1
Original line number Diff line number Diff line
@@ -338,6 +338,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
	struct cik_mqd *m;
	uint32_t *mqd_hqd;
	uint32_t reg, wptr_val, data;
	bool valid_wptr = false;

	m = get_mqd(mqd);

@@ -356,7 +357,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
			     CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);

	if (read_user_wptr(mm, wptr, wptr_val))
	/* read_user_ptr may take the mm->mmap_sem.
	 * release srbm_mutex to avoid circular dependency between
	 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
	 */
	release_queue(kgd);
	valid_wptr = read_user_wptr(mm, wptr, wptr_val);
	acquire_queue(kgd, pipe_id, queue_id);
	if (valid_wptr)
		WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);

	data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
+9 −1
Original line number Diff line number Diff line
@@ -292,6 +292,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
	struct vi_mqd *m;
	uint32_t *mqd_hqd;
	uint32_t reg, wptr_val, data;
	bool valid_wptr = false;

	m = get_mqd(mqd);

@@ -339,7 +340,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
			     CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);

	if (read_user_wptr(mm, wptr, wptr_val))
	/* read_user_ptr may take the mm->mmap_sem.
	 * release srbm_mutex to avoid circular dependency between
	 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
	 */
	release_queue(kgd);
	valid_wptr = read_user_wptr(mm, wptr, wptr_val);
	acquire_queue(kgd, pipe_id, queue_id);
	if (valid_wptr)
		WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);

	data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
+23 −0
Original line number Diff line number Diff line
@@ -42,6 +42,28 @@ struct amdgpu_cgs_device {
	struct amdgpu_device *adev =					\
		((struct amdgpu_cgs_device *)cgs_device)->adev

static void *amdgpu_cgs_register_pp_handle(struct cgs_device *cgs_device,
			int (*call_back_func)(struct amd_pp_init *, void **))
{
	CGS_FUNC_ADEV;
	struct amd_pp_init pp_init;
	struct amd_powerplay *amd_pp;

	if (call_back_func == NULL)
		return NULL;

	amd_pp = &(adev->powerplay);
	pp_init.chip_family = adev->family;
	pp_init.chip_id = adev->asic_type;
	pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
	pp_init.feature_mask = amdgpu_pp_feature_mask;
	pp_init.device = cgs_device;
	if (call_back_func(&pp_init, &(amd_pp->pp_handle)))
		return NULL;

	return adev->powerplay.pp_handle;
}

static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
				    enum cgs_gpu_mem_type type,
				    uint64_t size, uint64_t align,
@@ -1179,6 +1201,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
	.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
	.enter_safe_mode = amdgpu_cgs_enter_safe_mode,
	.lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
	.register_pp_handle = amdgpu_cgs_register_pp_handle,
};

static const struct cgs_os_ops amdgpu_cgs_os_ops = {
+61 −0
Original line number Diff line number Diff line
@@ -25,6 +25,7 @@
 *    Jerome Glisse <glisse@freedesktop.org>
 */
#include <linux/pagemap.h>
#include <linux/sync_file.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
@@ -1330,6 +1331,66 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
	return fence;
}

int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = filp->driver_priv;
	union drm_amdgpu_fence_to_handle *info = data;
	struct dma_fence *fence;
	struct drm_syncobj *syncobj;
	struct sync_file *sync_file;
	int fd, r;

	if (amdgpu_kms_vram_lost(adev, fpriv))
		return -ENODEV;

	fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
	if (IS_ERR(fence))
		return PTR_ERR(fence);

	switch (info->in.what) {
	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
		r = drm_syncobj_create(&syncobj, 0, fence);
		dma_fence_put(fence);
		if (r)
			return r;
		r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
		drm_syncobj_put(syncobj);
		return r;

	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
		r = drm_syncobj_create(&syncobj, 0, fence);
		dma_fence_put(fence);
		if (r)
			return r;
		r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
		drm_syncobj_put(syncobj);
		return r;

	case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
		fd = get_unused_fd_flags(O_CLOEXEC);
		if (fd < 0) {
			dma_fence_put(fence);
			return fd;
		}

		sync_file = sync_file_create(fence);
		dma_fence_put(fence);
		if (!sync_file) {
			put_unused_fd(fd);
			return -ENOMEM;
		}

		fd_install(fd, sync_file->file);
		info->out.handle = fd;
		return 0;

	default:
		return -EINVAL;
	}
}

/**
 * amdgpu_cs_wait_all_fence - wait on all fences to signal
 *
Loading