Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b07c9d2a authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: move more logic into amdgpu_vm_map_gart v3



No need to duplicate that code over and over again. Also stop using the
flags to determine if we need to map the addresses.

v2: constify the pages_addr
v3: rebased, fix typo in commit message

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 599f4348
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -283,7 +283,7 @@ struct amdgpu_vm_pte_funcs {
			 unsigned count);
	/* write pte one entry at a time with addr mapping */
	void (*write_pte)(struct amdgpu_ib *ib,
			  uint64_t pe,
			  const dma_addr_t *pages_addr, uint64_t pe,
			  uint64_t addr, unsigned count,
			  uint32_t incr, uint32_t flags);
	/* for linear pte/pde updates without addr mapping */
@@ -962,7 +962,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
void amdgpu_vm_flush(struct amdgpu_ring *ring,
		     struct amdgpu_vm *vm,
		     struct fence *updates);
uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
				    struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
@@ -2198,7 +2198,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
#define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib)))
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
+24 −12
Original line number Diff line number Diff line
@@ -299,8 +299,13 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
		uint64_t src = adev->gart.table_addr + (addr >> 12) * 8;
		amdgpu_vm_copy_pte(adev, ib, pe, src, count);

	} else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) {
		amdgpu_vm_write_pte(adev, ib, pe, addr,
	} else if (flags & AMDGPU_PTE_SYSTEM) {
		dma_addr_t *pages_addr = adev->gart.pages_addr;
		amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr,
				    count, incr, flags);

	} else if (count < 3) {
		amdgpu_vm_write_pte(adev, ib, NULL, pe, addr,
				    count, incr, flags);

	} else {
@@ -378,25 +383,32 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
}

/**
 * amdgpu_vm_map_gart - get the physical address of a gart page
 * amdgpu_vm_map_gart - Resolve gart mapping of addr
 *
 * @adev: amdgpu_device pointer
 * @pages_addr: optional DMA address to use for lookup
 * @addr: the unmapped addr
 *
 * Look up the physical address of the page that the pte resolves
 * to (cayman+).
 * Returns the physical address of the page.
 * to and return the pointer for the page table entry.
 */
uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr)
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
{
	uint64_t result;

	if (pages_addr) {
		/* page table offset */
	result = adev->gart.pages_addr[addr >> PAGE_SHIFT];
		result = pages_addr[addr >> PAGE_SHIFT];

		/* in case cpu page size != gpu page size*/
		result |= addr & (~PAGE_MASK);

	} else {
		/* No mapping required */
		result = addr;
	}

	result &= 0xFFFFFFFFFFFFF000ULL;

	return result;
}

+2 −9
Original line number Diff line number Diff line
@@ -714,7 +714,7 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
 * Update PTEs by writing them manually using sDMA (CIK).
 */
static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
				  uint64_t pe,
				  const dma_addr_t *pages_addr, uint64_t pe,
				  uint64_t addr, unsigned count,
				  uint32_t incr, uint32_t flags)
{
@@ -733,14 +733,7 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
		ib->ptr[ib->length_dw++] = ndw;
		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
			if (flags & AMDGPU_PTE_SYSTEM) {
				value = amdgpu_vm_map_gart(ib->ring->adev, addr);
				value &= 0xFFFFFFFFFFFFF000ULL;
			} else if (flags & AMDGPU_PTE_VALID) {
				value = addr;
			} else {
				value = 0;
			}
			value = amdgpu_vm_map_gart(pages_addr, addr);
			addr += incr;
			value |= flags;
			ib->ptr[ib->length_dw++] = value;
+2 −9
Original line number Diff line number Diff line
@@ -772,7 +772,7 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
 * Update PTEs by writing them manually using sDMA (CIK).
 */
static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
				   uint64_t pe,
				   const dma_addr_t *pages_addr, uint64_t pe,
				   uint64_t addr, unsigned count,
				   uint32_t incr, uint32_t flags)
{
@@ -791,14 +791,7 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
		ib->ptr[ib->length_dw++] = ndw;
		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
			if (flags & AMDGPU_PTE_SYSTEM) {
				value = amdgpu_vm_map_gart(ib->ring->adev, addr);
				value &= 0xFFFFFFFFFFFFF000ULL;
			} else if (flags & AMDGPU_PTE_VALID) {
				value = addr;
			} else {
				value = 0;
			}
			value = amdgpu_vm_map_gart(pages_addr, addr);
			addr += incr;
			value |= flags;
			ib->ptr[ib->length_dw++] = value;
+2 −9
Original line number Diff line number Diff line
@@ -922,7 +922,7 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
 * Update PTEs by writing them manually using sDMA (CIK).
 */
static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
				   uint64_t pe,
				   const dma_addr_t *pages_addr, uint64_t pe,
				   uint64_t addr, unsigned count,
				   uint32_t incr, uint32_t flags)
{
@@ -941,14 +941,7 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
		ib->ptr[ib->length_dw++] = ndw;
		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
			if (flags & AMDGPU_PTE_SYSTEM) {
				value = amdgpu_vm_map_gart(ib->ring->adev, addr);
				value &= 0xFFFFFFFFFFFFF000ULL;
			} else if (flags & AMDGPU_PTE_VALID) {
				value = addr;
			} else {
				value = 0;
			}
			value = amdgpu_vm_map_gart(pages_addr, addr);
			addr += incr;
			value |= flags;
			ib->ptr[ib->length_dw++] = value;