Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0ccebd3b authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Fix kvcalloc() calls"

parents 917e0c52 c5f26809
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -1200,7 +1200,7 @@ int a6xx_hwsched_submit_cmdobj(struct adreno_device *adreno_dev,
	if (WARN_ON(cmd_sizebytes > HFI_MAX_MSG_SIZE))
		return -EMSGSIZE;

	cmd = kvmalloc(cmd_sizebytes, GFP_KERNEL);
	cmd = kmalloc(cmd_sizebytes, GFP_KERNEL);
	if (cmd == NULL)
		return -ENOMEM;

@@ -1264,7 +1264,7 @@ int a6xx_hwsched_submit_cmdobj(struct adreno_device *adreno_dev,
	adreno_profile_submit_time(&time);

free:
	kvfree(cmd);
	kfree(cmd);

	return ret;
}
+2 −1
Original line number Diff line number Diff line
@@ -2346,7 +2346,8 @@ static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, struct file *vmfile)
	if (sglen == 0 || sglen >= LONG_MAX)
		return -EINVAL;

	pages = kvcalloc(sglen, sizeof(*pages), GFP_KERNEL);
	pages = kvcalloc(sglen, sizeof(*pages),
		GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
	if (pages == NULL)
		return -ENOMEM;

+2 −0
Original line number Diff line number Diff line
@@ -176,6 +176,8 @@ struct kgsl_memdesc_ops {
#define KGSL_MEMDESC_UCODE BIT(7)
/* For global buffers, randomly assign an address from the region */
#define KGSL_MEMDESC_RANDOM BIT(8)
/* Allocate memory from the system instead of the pools */
#define KGSL_MEMDESC_SYSMEM BIT(9)

/**
 * struct kgsl_memdesc - GPU memory object descriptor
+11 −1
Original line number Diff line number Diff line
@@ -96,6 +96,15 @@ _kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p)
	if (!p)
		return;

	/*
	 * Sanity check to make sure we don't re-pool a page that
	 * somebody else has a reference to.
	 */
	if (WARN_ON(unlikely(page_count(p) > 1))) {
		__free_pages(p, pool->pool_order);
		return;
	}

	spin_lock(&pool->list_lock);
	list_add_tail(&p->lru, &pool->page_list);
	pool->page_count++;
@@ -433,7 +442,8 @@ int kgsl_pool_alloc_pages(u64 size, struct page ***pages, struct device *dev)
{
	int count = 0;
	int npages = size >> PAGE_SHIFT;
	struct page **local = kvcalloc(npages, sizeof(*local), GFP_KERNEL);
	struct page **local = kvcalloc(npages, sizeof(*local),
		GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
	u32 page_size, align;
	u64 len = size;

+118 −6
Original line number Diff line number Diff line
@@ -862,6 +862,38 @@ static void kgsl_contiguous_free(struct kgsl_memdesc *memdesc)
}

#if IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER)
static void kgsl_free_secure_system_pages(struct kgsl_memdesc *memdesc)
{
	int i;
	struct scatterlist *sg;
	int ret = unlock_sgt(memdesc->sgt);

	if (ret) {
		/*
		 * Unlock of the secure buffer failed. This buffer will
		 * be stuck in secure side forever and is unrecoverable.
		 * Give up on the buffer and don't return it to the
		 * pool.
		 */
		pr_err("kgsl: secure buf unlock failed: gpuaddr: %llx size: %llx ret: %d\n",
			memdesc->gpuaddr, memdesc->size, ret);
		return;
	}

	atomic_long_sub(memdesc->size, &kgsl_driver.stats.secure);

	for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i) {
		struct page *page = sg_page(sg);

		__free_pages(page, get_order(PAGE_SIZE));
	}

	sg_free_table(memdesc->sgt);
	kfree(memdesc->sgt);

	memdesc->sgt = NULL;
}

static void kgsl_free_secure_pool_pages(struct kgsl_memdesc *memdesc)
{
	int ret = unlock_sgt(memdesc->sgt);
@@ -904,6 +936,23 @@ static void kgsl_free_pool_pages(struct kgsl_memdesc *memdesc)
	memdesc->pages = NULL;
}

static void kgsl_free_system_pages(struct kgsl_memdesc *memdesc)
{
	int i;

	kgsl_paged_unmap_kernel(memdesc);
	WARN_ON(memdesc->hostptr);

	atomic_long_sub(memdesc->size, &kgsl_driver.stats.page_alloc);

	for (i = 0; i < memdesc->page_count; i++)
		__free_pages(memdesc->pages[i], get_order(PAGE_SIZE));

	memdesc->page_count = 0;
	kvfree(memdesc->pages);
	memdesc->pages = NULL;
}

static struct kgsl_memdesc_ops kgsl_contiguous_ops = {
	.free = kgsl_contiguous_free,
	.vmflags = VM_DONTDUMP | VM_PFNMAP | VM_DONTEXPAND | VM_DONTCOPY,
@@ -911,6 +960,11 @@ static struct kgsl_memdesc_ops kgsl_contiguous_ops = {
};

#if IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER)
static struct kgsl_memdesc_ops kgsl_secure_system_ops = {
	.free = kgsl_free_secure_system_pages,
	/* FIXME: Make sure vmflags / vmfault does the right thing here */
};

static struct kgsl_memdesc_ops kgsl_secure_pool_ops = {
	.free = kgsl_free_secure_pool_pages,
	/* FIXME: Make sure vmflags / vmfault does the right thing here */
@@ -925,6 +979,49 @@ static struct kgsl_memdesc_ops kgsl_pool_ops = {
	.unmap_kernel = kgsl_paged_unmap_kernel,
};

static struct kgsl_memdesc_ops kgsl_system_ops = {
	.free = kgsl_free_system_pages,
	.vmflags = VM_DONTDUMP | VM_DONTEXPAND | VM_DONTCOPY | VM_MIXEDMAP,
	.vmfault = kgsl_paged_vmfault,
	.map_kernel = kgsl_paged_map_kernel,
	.unmap_kernel = kgsl_paged_unmap_kernel,
};

static int kgsl_system_alloc_pages(u64 size, struct page ***pages,
		struct device *dev)
{
	struct scatterlist sg;
	struct page **local;
	int i, npages = size >> PAGE_SHIFT;

	local = kvcalloc(npages, sizeof(*pages), GFP_KERNEL | __GFP_NORETRY);
	if (!local)
		return -ENOMEM;

	for (i = 0; i < npages; i++) {
		gfp_t gfp = __GFP_ZERO | __GFP_HIGHMEM |
			GFP_KERNEL | __GFP_NORETRY;

		local[i] = alloc_pages(gfp, get_order(PAGE_SIZE));
		if (!local[i]) {
			for (i = i - 1; i >= 0; i--)
				__free_pages(local[i], get_order(PAGE_SIZE));
			kvfree(local);
			return -ENOMEM;
		}

		/* Make sure the cache is clean */
		sg_init_table(&sg, 1);
		sg_set_page(&sg, local[i], PAGE_SIZE, 0);
		sg_dma_address(&sg) = page_to_phys(local[i]);

		dma_sync_sg_for_device(dev, &sg, 1, DMA_BIDIRECTIONAL);
	}

	*pages = local;
	return npages;
}

#if IS_ENABLED(CONFIG_QCOM_SECURE_BUFFER)
static int kgsl_alloc_secure_pages(struct kgsl_device *device,
		struct kgsl_memdesc *memdesc, u64 size, u64 flags, u32 priv)
@@ -942,8 +1039,13 @@ static int kgsl_alloc_secure_pages(struct kgsl_device *device,
	kgsl_memdesc_init(device, memdesc, flags);
	memdesc->priv |= priv;

	if (priv & KGSL_MEMDESC_SYSMEM) {
		memdesc->ops = &kgsl_secure_system_ops;
		count = kgsl_system_alloc_pages(size, &pages, device->dev);
	} else {
		memdesc->ops = &kgsl_secure_pool_ops;
		count = kgsl_pool_alloc_pages(size, &pages, device->dev);
	}

	if (count < 0)
		return count;
@@ -997,8 +1099,13 @@ static int kgsl_alloc_pages(struct kgsl_device *device,
	kgsl_memdesc_init(device, memdesc, flags);
	memdesc->priv |= priv;

	if (priv & KGSL_MEMDESC_SYSMEM) {
		memdesc->ops = &kgsl_system_ops;
		count = kgsl_system_alloc_pages(size, &pages, device->dev);
	} else {
		memdesc->ops = &kgsl_pool_ops;
		count = kgsl_pool_alloc_pages(size, &pages, device->dev);
	}

	if (count < 0)
		return count;
@@ -1158,7 +1265,8 @@ kgsl_allocate_secure_global(struct kgsl_device *device,
	if (!md)
		return ERR_PTR(-ENOMEM);

	priv |= KGSL_MEMDESC_GLOBAL;
	/* Make sure that we get global memory from system memory */
	priv |= KGSL_MEMDESC_GLOBAL | KGSL_MEMDESC_SYSMEM;

	ret = kgsl_allocate_secure(device, &md->memdesc, size, flags, priv);
	if (ret) {
@@ -1197,7 +1305,11 @@ struct kgsl_memdesc *kgsl_allocate_global(struct kgsl_device *device,
	if (!md)
		return ERR_PTR(-ENOMEM);

	priv |= KGSL_MEMDESC_GLOBAL;
	/*
	 * Make sure that we get global memory from system memory to keep from
	 * taking up pool memory for the life of the driver
	 */
	priv |= KGSL_MEMDESC_GLOBAL | KGSL_MEMDESC_SYSMEM;

	ret = kgsl_allocate_kernel(device, &md->memdesc, size, flags, priv);
	if (ret) {