Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b1e5f172 authored by Jerome Glisse's avatar Jerome Glisse Committed by Dave Airlie
Browse files

drm/ttm: introduce callback for ttm_tt populate & unpopulate V4



Move the page allocation and freeing to driver callback and
provide ttm code helper function for those.

Most intrusive change, is the fact that we now only fully
populate an object this simplify some of code designed around
the page fault design.

V2 Rebase on top of memory accounting overhaul
V3 New rebase on top of more memory accouting changes
V4 Rebase on top of no memory account changes (where/when is my
   delorean when i need it ?)

Signed-off-by: default avatarJerome Glisse <jglisse@redhat.com>
Reviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
parent 649bf3ca
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -28,6 +28,7 @@
 */

#include "drmP.h"
#include "ttm/ttm_page_alloc.h"

#include "nouveau_drm.h"
#include "nouveau_drv.h"
@@ -1050,6 +1051,8 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)

struct ttm_bo_driver nouveau_bo_driver = {
	.ttm_tt_create = &nouveau_ttm_tt_create,
	.ttm_tt_populate = &ttm_pool_populate,
	.ttm_tt_unpopulate = &ttm_pool_unpopulate,
	.invalidate_caches = nouveau_bo_invalidate_caches,
	.init_mem_type = nouveau_bo_init_mem_type,
	.evict_flags = nouveau_bo_evict_flags,
+2 −0
Original line number Diff line number Diff line
@@ -581,6 +581,8 @@ struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,

static struct ttm_bo_driver radeon_bo_driver = {
	.ttm_tt_create = &radeon_ttm_tt_create,
	.ttm_tt_populate = &ttm_pool_populate,
	.ttm_tt_unpopulate = &ttm_pool_unpopulate,
	.invalidate_caches = &radeon_invalidate_caches,
	.init_mem_type = &radeon_init_mem_type,
	.evict_flags = &radeon_evict_flags,
+17 −14
Original line number Diff line number Diff line
@@ -244,7 +244,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
				unsigned long page,
				pgprot_t prot)
{
	struct page *d = ttm_tt_get_page(ttm, page);
	struct page *d = ttm->pages[page];
	void *dst;

	if (!d)
@@ -281,7 +281,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
				unsigned long page,
				pgprot_t prot)
{
	struct page *s = ttm_tt_get_page(ttm, page);
	struct page *s = ttm->pages[page];
	void *src;

	if (!s)
@@ -342,6 +342,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
	if (old_iomap == NULL && ttm == NULL)
		goto out2;

	if (ttm->state == tt_unpopulated) {
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
		if (ret)
			goto out1;
	}

	add = 0;
	dir = 1;

@@ -502,10 +508,16 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
{
	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
	struct ttm_tt *ttm = bo->ttm;
	struct page *d;
	int i;
	int ret;

	BUG_ON(!ttm);

	if (ttm->state == tt_unpopulated) {
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
		if (ret)
			return ret;
	}

	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
		/*
		 * We're mapping a single page, and the desired
@@ -513,18 +525,9 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
		 */

		map->bo_kmap_type = ttm_bo_map_kmap;
		map->page = ttm_tt_get_page(ttm, start_page);
		map->page = ttm->pages[start_page];
		map->virtual = kmap(map->page);
	} else {
	    /*
	     * Populate the part we're mapping;
	     */
		for (i = start_page; i < start_page + num_pages; ++i) {
			d = ttm_tt_get_page(ttm, i);
			if (!d)
				return -ENOMEM;
		}

		/*
		 * We need to use vmap to get the desired page protection
		 * or to make the buffer object look contiguous.
+7 −2
Original line number Diff line number Diff line
@@ -174,18 +174,23 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
		vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
		    vm_get_page_prot(vma->vm_flags) :
		    ttm_io_prot(bo->mem.placement, vma->vm_page_prot);

		/* Allocate all page at once, most common usage */
		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
			retval = VM_FAULT_OOM;
			goto out_io_unlock;
		}
	}

	/*
	 * Speculatively prefault a number of pages. Only error on
	 * first page.
	 */

	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
		if (bo->mem.bus.is_iomem)
			pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
		else {
			page = ttm_tt_get_page(ttm, page_offset);
			page = ttm->pages[page_offset];
			if (unlikely(!page && i == 0)) {
				retval = VM_FAULT_OOM;
				goto out_io_unlock;
+57 −0
Original line number Diff line number Diff line
@@ -855,6 +855,63 @@ void ttm_page_alloc_fini(void)
	_manager = NULL;
}

int ttm_pool_populate(struct ttm_tt *ttm)
{
	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
	unsigned i;
	int ret;

	if (ttm->state != tt_unpopulated)
		return 0;

	for (i = 0; i < ttm->num_pages; ++i) {
		ret = ttm_get_pages(&ttm->pages[i], ttm->page_flags,
				    ttm->caching_state, 1,
				    &ttm->dma_address[i]);
		if (ret != 0) {
			ttm_pool_unpopulate(ttm);
			return -ENOMEM;
		}

		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
						false, false);
		if (unlikely(ret != 0)) {
			ttm_pool_unpopulate(ttm);
			return -ENOMEM;
		}
	}

	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
		ret = ttm_tt_swapin(ttm);
		if (unlikely(ret != 0)) {
			ttm_pool_unpopulate(ttm);
			return ret;
		}
	}

	ttm->state = tt_unbound;
	return 0;
}
EXPORT_SYMBOL(ttm_pool_populate);

void ttm_pool_unpopulate(struct ttm_tt *ttm)
{
	unsigned i;

	for (i = 0; i < ttm->num_pages; ++i) {
		if (ttm->pages[i]) {
			ttm_mem_global_free_page(ttm->glob->mem_glob,
						 ttm->pages[i]);
			ttm_put_pages(&ttm->pages[i], 1,
				      ttm->page_flags,
				      ttm->caching_state,
				      ttm->dma_address);
		}
	}
	ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL(ttm_pool_unpopulate);

int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{
	struct ttm_page_pool *p;
Loading