Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b1166325 authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: cleanup adjust_mc_addr handling v4



Rename adjust_mc_addr to get_vm_pde and check the address bits in one place.

v2: handle vcn as well, keep setting the valid bit manually,
    add a BUG_ON() for GMC v6, v7 and v8 as well.
v3: handle vcn_v1_0_enc_ring_emit_vm_flush as well.
v4: fix the BUG_ON mask for GFX6-8

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e8835e0e
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -308,8 +308,8 @@ struct amdgpu_gart_funcs {
	/* set pte flags based per asic */
	uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
				     uint32_t flags);
	/* adjust mc addr in fb for APU case */
	u64 (*adjust_mc_addr)(struct amdgpu_device *adev, u64 addr);
	/* get the pde for a given mc addr */
	u64 (*get_vm_pde)(struct amdgpu_device *adev, u64 addr);
	uint32_t (*get_invalidate_req)(unsigned int vm_id);
};

@@ -1813,6 +1813,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
#define amdgpu_gart_get_vm_pde(adev, addr) (adev)->gart.gart_funcs->get_vm_pde((adev), (addr))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
+9 −17
Original line number Diff line number Diff line
@@ -682,16 +682,6 @@ static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
	return false;
}

static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
{
	u64 addr = mc_addr;

	if (adev->gart.gart_funcs->adjust_mc_addr)
		addr = adev->gart.gart_funcs->adjust_mc_addr(adev, addr);

	return addr;
}

bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
				  struct amdgpu_job *job)
{
@@ -1033,18 +1023,18 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
		    (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {

			if (count) {
				uint64_t pt_addr =
					amdgpu_vm_adjust_mc_addr(adev, last_pt);
				uint64_t entry;

				entry = amdgpu_gart_get_vm_pde(adev, last_pt);
				if (shadow)
					amdgpu_vm_do_set_ptes(&params,
							      last_shadow,
							      pt_addr, count,
							      entry, count,
							      incr,
							      AMDGPU_PTE_VALID);

				amdgpu_vm_do_set_ptes(&params, last_pde,
						      pt_addr, count, incr,
						      entry, count, incr,
						      AMDGPU_PTE_VALID);
			}

@@ -1058,13 +1048,15 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
	}

	if (count) {
		uint64_t pt_addr = amdgpu_vm_adjust_mc_addr(adev, last_pt);
		uint64_t entry;

		entry = amdgpu_gart_get_vm_pde(adev, last_pt);

		if (vm->root.bo->shadow)
			amdgpu_vm_do_set_ptes(&params, last_shadow, pt_addr,
			amdgpu_vm_do_set_ptes(&params, last_shadow, entry,
					      count, incr, AMDGPU_PTE_VALID);

		amdgpu_vm_do_set_ptes(&params, last_pde, pt_addr,
		amdgpu_vm_do_set_ptes(&params, last_pde, entry,
				      count, incr, AMDGPU_PTE_VALID);
	}

+2 −4
Original line number Diff line number Diff line
@@ -3832,10 +3832,8 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
	unsigned eng = ring->vm_inv_eng;

	pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr);
	pd_addr = pd_addr | 0x1; /* valid bit */
	/* now only use physical base address of PDE and valid */
	BUG_ON(pd_addr & 0xFFFF00000000003EULL);
	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
	pd_addr |= AMDGPU_PTE_VALID;

	gfx_v9_0_write_data_to_reg(ring, usepfp, true,
				   hub->ctx0_ptb_addr_lo32 + (2 * vm_id),
+7 −0
Original line number Diff line number Diff line
@@ -395,6 +395,12 @@ static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
	return pte_flag;
}

static uint64_t gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
{
	BUG_ON(addr & 0xFFFFFF0000000FFFULL);
	return addr;
}

static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
					      bool value)
{
@@ -1121,6 +1127,7 @@ static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
	.flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
	.set_pte_pde = gmc_v6_0_gart_set_pte_pde,
	.set_prt = gmc_v6_0_set_prt,
	.get_vm_pde = gmc_v6_0_get_vm_pde,
	.get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
};

+8 −1
Original line number Diff line number Diff line
@@ -472,6 +472,12 @@ static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
	return pte_flag;
}

static uint64_t gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
{
	BUG_ON(addr & 0xFFFFFF0000000FFFULL);
	return addr;
}

/**
 * gmc_v8_0_set_fault_enable_default - update VM fault handling
 *
@@ -1293,7 +1299,8 @@ static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
	.flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
	.set_pte_pde = gmc_v7_0_gart_set_pte_pde,
	.set_prt = gmc_v7_0_set_prt,
	.get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags
	.get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
	.get_vm_pde = gmc_v7_0_get_vm_pde
};

static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
Loading