Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d7931c18 authored by Daniel Vetter's avatar Daniel Vetter
Browse files

Merge tag 'gvt-next-2016-11-07' of https://github.com/01org/gvt-linux into drm-intel-next-queued



gvt-next-2016-11-07

- Fix regression from e95433c7
- Some MMIO handler fixes
- Add better handling for guest reset control
- stratch page table tree for shadow ppgtt

Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
parents bfb81049 3b6411c2
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -1418,8 +1418,8 @@ static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
{
	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
	int op_size = ((1 << (cmd_val(s, 0) & GENMASK(20, 19) >> 19)) *
			sizeof(u32));
	int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
			sizeof(u32);
	unsigned long gma, gma_high;
	int ret = 0;

+90 −61
Original line number Diff line number Diff line
@@ -138,36 +138,6 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
	memcpy(&(e)->val64, &v, sizeof(v)); \
} while (0)

enum {
	GTT_TYPE_INVALID = -1,

	GTT_TYPE_GGTT_PTE,

	GTT_TYPE_PPGTT_PTE_4K_ENTRY,
	GTT_TYPE_PPGTT_PTE_2M_ENTRY,
	GTT_TYPE_PPGTT_PTE_1G_ENTRY,

	GTT_TYPE_PPGTT_PTE_ENTRY,

	GTT_TYPE_PPGTT_PDE_ENTRY,
	GTT_TYPE_PPGTT_PDP_ENTRY,
	GTT_TYPE_PPGTT_PML4_ENTRY,

	GTT_TYPE_PPGTT_ROOT_ENTRY,

	GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
	GTT_TYPE_PPGTT_ROOT_L4_ENTRY,

	GTT_TYPE_PPGTT_ENTRY,

	GTT_TYPE_PPGTT_PTE_PT,
	GTT_TYPE_PPGTT_PDE_PT,
	GTT_TYPE_PPGTT_PDP_PT,
	GTT_TYPE_PPGTT_PML4_PT,

	GTT_TYPE_MAX,
};

/*
 * Mappings between GTT_TYPE* enumerations.
 * Following information can be found according to the given type:
@@ -842,13 +812,18 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
{
	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
	struct intel_vgpu_ppgtt_spt *s;
	intel_gvt_gtt_type_t cur_pt_type;

	if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
		return -EINVAL;

	if (ops->get_pfn(e) == vgpu->gtt.scratch_page_mfn)
	if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
		&& e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
		cur_pt_type = get_next_pt_type(e->type) + 1;
		if (ops->get_pfn(e) ==
			vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
			return 0;

	}
	s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
	if (!s) {
		gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
@@ -1015,7 +990,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
	if (!ops->test_present(&e))
		return 0;

	if (ops->get_pfn(&e) == vgpu->gtt.scratch_page_mfn)
	if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
		return 0;

	if (gtt_type_is_pt(get_next_pt_type(we->type))) {
@@ -1030,7 +1005,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
		if (ret)
			goto fail;
	}
	ops->set_pfn(&e, vgpu->gtt.scratch_page_mfn);
	ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
	ppgtt_set_shadow_entry(spt, &e, index);
	return 0;
fail:
@@ -1921,47 +1896,101 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
	return ret;
}

static int create_scratch_page(struct intel_vgpu *vgpu)
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
		intel_gvt_gtt_type_t type)
{
	struct intel_vgpu_gtt *gtt = &vgpu->gtt;
	void *p;
	void *vaddr;
	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
	int page_entry_num = GTT_PAGE_SIZE >>
				vgpu->gvt->device_info.gtt_entry_size_shift;
	struct page *scratch_pt;
	unsigned long mfn;
	int i;
	void *p;

	if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
		return -EINVAL;

	gtt->scratch_page = alloc_page(GFP_KERNEL);
	if (!gtt->scratch_page) {
		gvt_err("Failed to allocate scratch page.\n");
	scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
	if (!scratch_pt) {
		gvt_err("fail to allocate scratch page\n");
		return -ENOMEM;
	}

	/* set to zero */
	p = kmap_atomic(gtt->scratch_page);
	memset(p, 0, PAGE_SIZE);
	p = kmap_atomic(scratch_pt);
	mfn = intel_gvt_hypervisor_virt_to_mfn(p);
	if (mfn == INTEL_GVT_INVALID_ADDR) {
		gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
		kunmap_atomic(p);
		__free_page(scratch_pt);
		return -EFAULT;
	}
	gtt->scratch_pt[type].page_mfn = mfn;
	gtt->scratch_pt[type].page = scratch_pt;
	gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
			vgpu->id, type, mfn);

	/* Build the tree by full filled the scratch pt with the entries which
	 * point to the next level scratch pt or scratch page. The
	 * scratch_pt[type] indicate the scratch pt/scratch page used by the
	 * 'type' pt.
	 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
	 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
	 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
	 */
	if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
		struct intel_gvt_gtt_entry se;

	/* translate page to mfn */
	vaddr = page_address(gtt->scratch_page);
	mfn = intel_gvt_hypervisor_virt_to_mfn(vaddr);
		memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
		se.type = get_entry_type(type - 1);
		ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);

	if (mfn == INTEL_GVT_INVALID_ADDR) {
		gvt_err("fail to translate vaddr: 0x%p\n", vaddr);
		__free_page(gtt->scratch_page);
		gtt->scratch_page = NULL;
		return -ENXIO;
		/* The entry parameters like present/writeable/cache type
		 * set to the same as i915's scratch page tree.
		 */
		se.val64 |= _PAGE_PRESENT | _PAGE_RW;
		if (type == GTT_TYPE_PPGTT_PDE_PT)
			se.val64 |= PPAT_CACHED_INDEX;

		for (i = 0; i < page_entry_num; i++)
			ops->set_entry(p, &se, i, false, 0, vgpu);
	}

	gtt->scratch_page_mfn = mfn;
	gvt_dbg_core("vgpu%d create scratch page: mfn=0x%lx\n", vgpu->id, mfn);
	kunmap_atomic(p);

	return 0;
}

static void release_scratch_page(struct intel_vgpu *vgpu)
static int release_scratch_page_tree(struct intel_vgpu *vgpu)
{
	if (vgpu->gtt.scratch_page != NULL) {
		__free_page(vgpu->gtt.scratch_page);
		vgpu->gtt.scratch_page = NULL;
		vgpu->gtt.scratch_page_mfn = 0;
	int i;

	for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
		if (vgpu->gtt.scratch_pt[i].page != NULL) {
			__free_page(vgpu->gtt.scratch_pt[i].page);
			vgpu->gtt.scratch_pt[i].page = NULL;
			vgpu->gtt.scratch_pt[i].page_mfn = 0;
		}
	}

	return 0;
}

static int create_scratch_page_tree(struct intel_vgpu *vgpu)
{
	int i, ret;

	for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
		ret = alloc_scratch_pages(vgpu, i);
		if (ret)
			goto err;
	}

	return 0;

err:
	release_scratch_page_tree(vgpu);
	return ret;
}

/**
@@ -1995,7 +2024,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)

	gtt->ggtt_mm = ggtt_mm;

	return create_scratch_page(vgpu);
	return create_scratch_page_tree(vgpu);
}

/**
@@ -2014,7 +2043,7 @@ void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
	struct intel_vgpu_mm *mm;

	ppgtt_free_all_shadow_page(vgpu);
	release_scratch_page(vgpu);
	release_scratch_page_tree(vgpu);

	list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
		mm = container_of(pos, struct intel_vgpu_mm, list);
+38 −2
Original line number Diff line number Diff line
@@ -88,6 +88,36 @@ enum {
	INTEL_GVT_MM_PPGTT,
};

typedef enum {
	GTT_TYPE_INVALID = -1,

	GTT_TYPE_GGTT_PTE,

	GTT_TYPE_PPGTT_PTE_4K_ENTRY,
	GTT_TYPE_PPGTT_PTE_2M_ENTRY,
	GTT_TYPE_PPGTT_PTE_1G_ENTRY,

	GTT_TYPE_PPGTT_PTE_ENTRY,

	GTT_TYPE_PPGTT_PDE_ENTRY,
	GTT_TYPE_PPGTT_PDP_ENTRY,
	GTT_TYPE_PPGTT_PML4_ENTRY,

	GTT_TYPE_PPGTT_ROOT_ENTRY,

	GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
	GTT_TYPE_PPGTT_ROOT_L4_ENTRY,

	GTT_TYPE_PPGTT_ENTRY,

	GTT_TYPE_PPGTT_PTE_PT,
	GTT_TYPE_PPGTT_PDE_PT,
	GTT_TYPE_PPGTT_PDP_PT,
	GTT_TYPE_PPGTT_PML4_PT,

	GTT_TYPE_MAX,
} intel_gvt_gtt_type_t;

struct intel_vgpu_mm {
	int type;
	bool initialized;
@@ -151,6 +181,12 @@ extern void intel_vgpu_destroy_mm(struct kref *mm_ref);

struct intel_vgpu_guest_page;

struct intel_vgpu_scratch_pt {
	struct page *page;
	unsigned long page_mfn;
};


struct intel_vgpu_gtt {
	struct intel_vgpu_mm *ggtt_mm;
	unsigned long active_ppgtt_mm_bitmap;
@@ -160,8 +196,8 @@ struct intel_vgpu_gtt {
	atomic_t n_write_protected_guest_page;
	struct list_head oos_page_list_head;
	struct list_head post_shadow_list_head;
	struct page *scratch_page;
	unsigned long scratch_page_mfn;
	struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];

};

extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
+38 −6
Original line number Diff line number Diff line
@@ -1158,7 +1158,10 @@ static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
		void *p_data, unsigned int bytes)
{
	u32 mode = *(u32 *)p_data;
	u32 mode;

	write_vreg(vgpu, offset, p_data, bytes);
	mode = vgpu_vreg(vgpu, offset);

	if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
		WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n",
@@ -1275,19 +1278,20 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
	switch (offset) {
	case 0x4ddc:
		vgpu_vreg(vgpu, offset) = 0x8000003c;
		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
		if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER))
			I915_WRITE(reg, vgpu_vreg(vgpu, offset));
		break;
	case 0x42080:
		vgpu_vreg(vgpu, offset) = 0x8000;
		/* WaCompressedResourceDisplayNewHashMode:skl */
		if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER))
			I915_WRITE(reg, vgpu_vreg(vgpu, offset));
		break;
	default:
		return -EINVAL;
	}

	/**
	 * TODO: need detect stepping info after gvt contain such information
	 * 0x4ddc enabled after C0, 0x42080 enabled after E0.
	 */
	I915_WRITE(reg, vgpu_vreg(vgpu, offset));
	return 0;
}

@@ -1367,6 +1371,8 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
	int rc = 0;
	unsigned int id = 0;

	write_vreg(vgpu, offset, p_data, bytes);

	switch (offset) {
	case 0x4260:
		id = RCS;
@@ -1392,6 +1398,23 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
	return rc;
}

static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
	unsigned int offset, void *p_data, unsigned int bytes)
{
	u32 data;

	write_vreg(vgpu, offset, p_data, bytes);
	data = vgpu_vreg(vgpu, offset);

	if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
		data |= RESET_CTL_READY_TO_RESET;
	else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
		data &= ~RESET_CTL_READY_TO_RESET;

	vgpu_vreg(vgpu, offset) = data;
	return 0;
}

#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
	ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \
		f, s, am, rm, d, r, w); \
@@ -2298,6 +2321,15 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)

	MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS);

#define RING_REG(base) (base + 0xd0)
	MMIO_RING_F(RING_REG, 4, F_RO, 0,
		~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
		ring_reset_ctl_write);
	MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0,
		~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
		ring_reset_ctl_write);
#undef RING_REG

#define RING_REG(base) (base + 0x230)
	MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
	MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
+2 −0
Original line number Diff line number Diff line
@@ -152,6 +152,8 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)

	if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
		gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
	else
		vgpu_vreg(vgpu, regs[ring_id]) = 0;

	intel_uncore_forcewake_put(dev_priv, fw);

Loading