Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 95d2c3e1 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-next-4.18' of git://people.freedesktop.org/~agd5f/linux into drm-next

Main changes for 4.18.  I'd like to do a separate pull for vega20 later
this week or next.  Highlights:
- Reserve pre-OS scanout buffer during init for seemless transition from
  console to driver
- VEGAM support
- Improved GPU scheduler documentation
- Initial gfxoff support for raven
- SR-IOV fixes
- Default to non-AGP on PowerPC for radeon
- Fine grained clock voltage control for vega10
- Power profiles for vega10
- Further clean up of powerplay/driver interface
- Underlay fixes
- Display link bw updates
- Gamma fixes
- Scatter/Gather display support on CZ/ST
- Misc bug fixes and clean ups

[airlied: fixup v3d vs scheduler API change]

Link: https://patchwork.freedesktop.org/patch/msgid/20180515185450.1113-1-alexander.deucher@amd.com


Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parents b8a71080 8344c53f
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -64,6 +64,10 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce
amdgpu-y += \
	vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o

# add DF block
amdgpu-y += \
	df_v1_7.o

# add GMC block
amdgpu-y += \
	gmc_v7_0.o \
+40 −3
Original line number Diff line number Diff line
@@ -129,6 +129,7 @@ extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe;
extern int amdgpu_gpu_recovery;
extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;

#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -137,6 +138,7 @@ extern int amdgpu_si_support;
extern int amdgpu_cik_support;
#endif

#define AMDGPU_SG_THRESHOLD			(256*1024*1024)
#define AMDGPU_DEFAULT_GTT_SIZE_MB		3072ULL /* 3GB by default */
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
#define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
@@ -222,10 +224,10 @@ enum amdgpu_kiq_irq {
	AMDGPU_CP_KIQ_IRQ_LAST
};

int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
int amdgpu_device_ip_set_clockgating_state(void *dev,
					   enum amd_ip_block_type block_type,
					   enum amd_clockgating_state state);
int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
int amdgpu_device_ip_set_powergating_state(void *dev,
					   enum amd_ip_block_type block_type,
					   enum amd_powergating_state state);
void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
@@ -681,6 +683,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);

void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);


@@ -771,9 +775,18 @@ struct amdgpu_rlc {
	u32 starting_offsets_start;
	u32 reg_list_format_size_bytes;
	u32 reg_list_size_bytes;
	u32 reg_list_format_direct_reg_list_length;
	u32 save_restore_list_cntl_size_bytes;
	u32 save_restore_list_gpm_size_bytes;
	u32 save_restore_list_srm_size_bytes;

	u32 *register_list_format;
	u32 *register_restore;
	u8 *save_restore_list_cntl;
	u8 *save_restore_list_gpm;
	u8 *save_restore_list_srm;

	bool is_rlc_v2_1;
};

#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
@@ -867,6 +880,8 @@ struct amdgpu_gfx_config {

	/* gfx configure feature */
	uint32_t double_offchip_lds_buf;
	/* cached value of DB_DEBUG2 */
	uint32_t db_debug2;
};

struct amdgpu_cu_info {
@@ -938,6 +953,12 @@ struct amdgpu_gfx {
	uint32_t			ce_feature_version;
	uint32_t			pfp_feature_version;
	uint32_t			rlc_feature_version;
	uint32_t			rlc_srlc_fw_version;
	uint32_t			rlc_srlc_feature_version;
	uint32_t			rlc_srlg_fw_version;
	uint32_t			rlc_srlg_feature_version;
	uint32_t			rlc_srls_fw_version;
	uint32_t			rlc_srls_feature_version;
	uint32_t			mec_feature_version;
	uint32_t			mec2_feature_version;
	struct amdgpu_ring		gfx_ring[AMDGPU_MAX_GFX_RINGS];
@@ -1204,6 +1225,8 @@ struct amdgpu_asic_funcs {
	/* invalidate hdp read cache */
	void (*invalidate_hdp)(struct amdgpu_device *adev,
			       struct amdgpu_ring *ring);
	/* check if the asic needs a full reset of if soft reset will work */
	bool (*need_full_reset)(struct amdgpu_device *adev);
};

/*
@@ -1368,7 +1391,17 @@ struct amdgpu_nbio_funcs {
	void (*detect_hw_virt)(struct amdgpu_device *adev);
};


struct amdgpu_df_funcs {
	void (*init)(struct amdgpu_device *adev);
	void (*enable_broadcast_mode)(struct amdgpu_device *adev,
				      bool enable);
	u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
	u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
	void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
						 bool enable);
	void (*get_clockgating_state)(struct amdgpu_device *adev,
				      u32 *flags);
};
/* Define the HW IP blocks will be used in driver , add more if necessary */
enum amd_hw_ip_block_type {
	GC_HWIP = 1,
@@ -1398,6 +1431,7 @@ enum amd_hw_ip_block_type {
struct amd_powerplay {
	void *pp_handle;
	const struct amd_pm_funcs *pp_funcs;
	uint32_t pp_feature;
};

#define AMDGPU_RESET_MAGIC_NUM 64
@@ -1590,6 +1624,7 @@ struct amdgpu_device {
	uint32_t 		*reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];

	const struct amdgpu_nbio_funcs	*nbio_funcs;
	const struct amdgpu_df_funcs	*df_funcs;

	/* delayed work_func for deferring clockgating during resume */
	struct delayed_work     late_init_work;
@@ -1764,6 +1799,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
@@ -1790,6 +1826,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
+5 −6
Original line number Diff line number Diff line
@@ -290,12 +290,11 @@ static int acp_hw_init(void *handle)
	else if (r)
		return r;

	r = cgs_get_pci_resource(adev->acp.cgs_device, CGS_RESOURCE_TYPE_MMIO,
			0x5289, 0, &acp_base);
	if (r == -ENODEV)
		return 0;
	else if (r)
		return r;
	if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
		return -EINVAL;

	acp_base = adev->rmmio_base;

	if (adev->asic_type != CHIP_STONEY) {
		adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
		if (adev->acp.acp_genpd == NULL)
+9 −3
Original line number Diff line number Diff line
@@ -243,13 +243,19 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
{
	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
	struct amdgpu_bo *bo = NULL;
	struct amdgpu_bo_param bp;
	int r;
	uint64_t gpu_addr_tmp = 0;
	void *cpu_ptr_tmp = NULL;

	r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
			     AMDGPU_GEM_CREATE_CPU_GTT_USWC, ttm_bo_type_kernel,
			     NULL, &bo);
	memset(&bp, 0, sizeof(bp));
	bp.size = size;
	bp.byte_align = PAGE_SIZE;
	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
	bp.type = ttm_bo_type_kernel;
	bp.resv = NULL;
	r = amdgpu_bo_create(adev, &bp, &bo);
	if (r) {
		dev_err(adev->dev,
			"failed to allocate BO for amdkfd (%d)\n", r);
+9 −2
Original line number Diff line number Diff line
@@ -1143,6 +1143,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
	uint64_t user_addr = 0;
	struct amdgpu_bo *bo;
	struct amdgpu_bo_param bp;
	int byte_align;
	u32 domain, alloc_domain;
	u64 alloc_flags;
@@ -1215,8 +1216,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
			va, size, domain_string(alloc_domain));

	ret = amdgpu_bo_create(adev, size, byte_align,
				alloc_domain, alloc_flags, ttm_bo_type_device, NULL, &bo);
	memset(&bp, 0, sizeof(bp));
	bp.size = size;
	bp.byte_align = byte_align;
	bp.domain = alloc_domain;
	bp.flags = alloc_flags;
	bp.type = ttm_bo_type_device;
	bp.resv = NULL;
	ret = amdgpu_bo_create(adev, &bp, &bo);
	if (ret) {
		pr_debug("Failed to create BO on domain %s. ret %d\n",
				domain_string(alloc_domain), ret);
Loading