Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 55dccd62 authored by Jordan Crouse's avatar Jordan Crouse
Browse files

msm: kgsl: Fixup kernel API changes



Life goes on, and so does the kernel. Update some API changes since
msm-4.19 including helper functions for vmfault functions, renaming
a memory statistic, updating cmd-db and removing __mutex_user which
is not longer available.

Change-Id: Ic0dedbad0b24042f3683a4be99888a0aa86baf78
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent 07ba0273
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -2858,8 +2858,8 @@ int adreno_dispatcher_idle(struct adreno_device *adreno_dev)
	 * Ensure that this function is not called when dispatcher
	 * mutex is held and device is started
	 */
	if (mutex_is_locked(&dispatcher->mutex) &&
		(__mutex_owner(&dispatcher->mutex) == current))

	if (WARN_ON(mutex_is_locked(&dispatcher->mutex)))
		return -EDEADLK;

	adreno_get_gpu_halt(adreno_dev);
+3 −3
Original line number Diff line number Diff line
@@ -4346,7 +4346,7 @@ static vm_fault_t
kgsl_gpumem_vm_fault(struct vm_fault *vmf)
{
	struct kgsl_mem_entry *entry = vmf->vma->vm_private_data;
	int ret;
	vm_fault_t ret;

	if (!entry)
		return VM_FAULT_SIGBUS;
@@ -4354,7 +4354,7 @@ kgsl_gpumem_vm_fault(struct vm_fault *vmf)
		return VM_FAULT_SIGBUS;

	ret = entry->memdesc.ops->vmfault(&entry->memdesc, vmf->vma, vmf);
	if ((ret == 0) || (ret == VM_FAULT_NOPAGE))
	if (!ret || ret == VM_FAULT_NOPAGE)
		entry->priv->gpumem_mapped += PAGE_SIZE;

	return ret;
@@ -4776,7 +4776,7 @@ static int _register_device(struct kgsl_device *device)
	}

	device->dev->dma_mask = &dma_mask;
	arch_setup_dma_ops(device->dev, 0, 0, NULL, false);
	set_dma_ops(device->dev, NULL);

	dev_set_drvdata(&device->pdev->dev, device);
	return 0;
+2 −2
Original line number Diff line number Diff line
@@ -163,8 +163,8 @@ struct kgsl_memdesc;

struct kgsl_memdesc_ops {
	unsigned int vmflags;
	int (*vmfault)(struct kgsl_memdesc *memdesc, struct vm_area_struct *vma,
		       struct vm_fault *vmf);
	vm_fault_t (*vmfault)(struct kgsl_memdesc *memdesc,
		struct vm_area_struct *vma, struct vm_fault *vmf);
	void (*free)(struct kgsl_memdesc *memdesc);
	int (*map_kernel)(struct kgsl_memdesc *memdesc);
	void (*unmap_kernel)(struct kgsl_memdesc *memdesc);
+3 −16
Original line number Diff line number Diff line
@@ -559,7 +559,7 @@ static int gmu_dcvs_set(struct kgsl_device *device,

struct rpmh_arc_vals {
	unsigned int num;
	uint16_t val[MAX_GX_LEVELS];
	const u16 *val;
};

static const char gfx_res_id[] = "gfx.lvl";
@@ -584,22 +584,9 @@ enum rpmh_vote_type {
static int rpmh_arc_cmds(struct gmu_device *gmu,
		struct rpmh_arc_vals *arc, const char *res_id)
{
	unsigned int len;
	size_t len = 0;

	memset(arc, 0, sizeof(*arc));

	len = cmd_db_read_aux_data_len(res_id);
	if (len == 0)
		return -EINVAL;

	if (len > (MAX_GX_LEVELS << 1)) {
		dev_err(&gmu->pdev->dev,
			"gfx cmddb size %d larger than alloc buf %d of %s\n",
			len, (MAX_GX_LEVELS << 1), res_id);
		return -EINVAL;
	}

	cmd_db_read_aux_data(res_id, (uint8_t *)arc->val, len);
	arc->val = cmd_db_read_aux_data(res_id, &len);

	/*
	 * cmd_db_read_aux_data() gives us a zero-padded table of
+2 −10
Original line number Diff line number Diff line
@@ -100,7 +100,7 @@ _kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p)
	list_add_tail(&p->lru, &pool->page_list);
	pool->page_count++;
	spin_unlock(&pool->list_lock);
	mod_node_page_state(page_pgdat(p), NR_INDIRECTLY_RECLAIMABLE_BYTES,
	mod_node_page_state(page_pgdat(p),  NR_KERNEL_MISC_RECLAIMABLE,
				(PAGE_SIZE << pool->pool_order));
}

@@ -117,7 +117,7 @@ _kgsl_pool_get_page(struct kgsl_page_pool *pool)
		list_del(&p->lru);
	}
	spin_unlock(&pool->list_lock);
	mod_node_page_state(page_pgdat(p), NR_INDIRECTLY_RECLAIMABLE_BYTES,
	mod_node_page_state(page_pgdat(p), NR_KERNEL_MISC_RECLAIMABLE,
				-(PAGE_SIZE << pool->pool_order));
	return p;
}
@@ -256,20 +256,12 @@ void kgsl_pool_free_pages(struct page **pages, unsigned int pcount)
	if (!pages)
		return;

	if (WARN(!kern_addr_valid((unsigned long)pages),
		"Address of pages=%pK is not valid\n", pages))
		return;

	for (i = 0; i < pcount;) {
		/*
		 * Free each page or compound page group individually.
		 */
		struct page *p = pages[i];

		if (WARN(!kern_addr_valid((unsigned long)p),
			"Address of page=%pK is not valid\n", p))
			return;

		i += 1 << compound_order(p);
		kgsl_pool_free_page(p);
	}
Loading