Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aa095871 authored by Matthew Auld's avatar Matthew Auld Committed by Chris Wilson
Browse files

drm/i915: add support for 64K scratch page



Before we can fully enable 64K pages, we need to first support a 64K
scratch page if we intend to support the case where we have object sizes
< 2M, since any scratch PTE must also point to a 64K region.  Without
this our 64K usage is limited to objects which completely fill the
page-table, and therefore don't need any scratch.

v2: add reminder about why 48b PPGTT

Reported-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171006145041.21673-14-matthew.auld@intel.com


Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171006221833.32439-13-chris@chris-wilson.co.uk
parent 0a03852e
Loading
Loading
Loading
Loading
+53 −11
Original line number Diff line number Diff line
@@ -519,9 +519,47 @@ static void fill_page_dma_32(struct i915_address_space *vm,
static int
setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
{
	struct page *page;
	struct page *page = NULL;
	dma_addr_t addr;
	int order;

	/*
	 * In order to utilize 64K pages for an object with a size < 2M, we will
	 * need to support a 64K scratch page, given that every 16th entry for a
	 * page-table operating in 64K mode must point to a properly aligned 64K
	 * region, including any PTEs which happen to point to scratch.
	 *
	 * This is only relevant for the 48b PPGTT where we support
	 * huge-gtt-pages, see also i915_vma_insert().
	 *
	 * TODO: we should really consider write-protecting the scratch-page and
	 * sharing between ppgtt
	 */
	if (i915_vm_is_48bit(vm) &&
	    HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
		order = get_order(I915_GTT_PAGE_SIZE_64K);
		page = alloc_pages(gfp | __GFP_ZERO, order);
		if (page) {
			addr = dma_map_page(vm->dma, page, 0,
					    I915_GTT_PAGE_SIZE_64K,
					    PCI_DMA_BIDIRECTIONAL);
			if (unlikely(dma_mapping_error(vm->dma, addr))) {
				__free_pages(page, order);
				page = NULL;
			}

			if (!IS_ALIGNED(addr, I915_GTT_PAGE_SIZE_64K)) {
				dma_unmap_page(vm->dma, addr,
					       I915_GTT_PAGE_SIZE_64K,
					       PCI_DMA_BIDIRECTIONAL);
				__free_pages(page, order);
				page = NULL;
			}
		}
	}

	if (!page) {
		order = 0;
		page = alloc_page(gfp | __GFP_ZERO);
		if (unlikely(!page))
			return -ENOMEM;
@@ -532,9 +570,12 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
			__free_page(page);
			return -ENOMEM;
		}
	}

	vm->scratch_page.page = page;
	vm->scratch_page.daddr = addr;
	vm->scratch_page.order = order;

	return 0;
}

@@ -542,8 +583,9 @@ static void cleanup_scratch_page(struct i915_address_space *vm)
{
	struct i915_page_dma *p = &vm->scratch_page;

	dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
	__free_page(p->page);
	dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT,
		       PCI_DMA_BIDIRECTIONAL);
	__free_pages(p->page, p->order);
}

static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
+1 −0
Original line number Diff line number Diff line
@@ -215,6 +215,7 @@ struct i915_vma;

struct i915_page_dma {
	struct page *page;
	int order;
	union {
		dma_addr_t daddr;