Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bae62cc7 authored by Jordan Crouse's avatar Jordan Crouse
Browse files

msm: kgsl: Replace kgsl_malloc and kgsl_free with generic functions



kvcalloc and kvfree exist for the same purpose as kgsl_malloc and
kgsl_free.

Change-Id: Ic0dedbad180ebb3a8ffd1c3d1fd44e5e7811cf16
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent 03e92d7e
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -2344,11 +2344,11 @@ static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, struct file *vmfile)
	if (sglen == 0 || sglen >= LONG_MAX)
		return -EINVAL;

	pages = kgsl_malloc(sglen * sizeof(struct page *));
	pages = kvcalloc(sglen, sizeof(*pages), GFP_KERNEL);
	if (pages == NULL)
		return -ENOMEM;

	memdesc->sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
	memdesc->sgt = kmalloc(sizeof(*memdesc->sgt), GFP_KERNEL);
	if (memdesc->sgt == NULL) {
		ret = -ENOMEM;
		goto out;
@@ -2385,7 +2385,7 @@ static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, struct file *vmfile)
		kfree(memdesc->sgt);
		memdesc->sgt = NULL;
	}
	kgsl_free(pages);
	kvfree(pages);
	return ret;
}

+0 −29
Original line number Diff line number Diff line
@@ -568,35 +568,6 @@ static inline bool kgsl_addr_range_overlap(uint64_t gpuaddr1,
		(gpuaddr1 >= (gpuaddr2 + size2)));
}

/**
 * kgsl_malloc() - Use either kzalloc or vmalloc to allocate memory
 * @size: Size of the desired allocation
 *
 * Allocate a block of memory for the driver - if it is small try to allocate it
 * from kmalloc (fast!) otherwise we need to go with vmalloc (safe!)
 */
static inline void *kgsl_malloc(size_t size)
{
	if (size <= PAGE_SIZE)
		return kzalloc(size, GFP_KERNEL);

	return vmalloc(size);
}

/**
 * kgsl_free() - Free memory allocated by kgsl_malloc()
 * @ptr: Pointer to the memory to free
 *
 * Free the memory be it in vmalloc or kmalloc space
 */
static inline void kgsl_free(void *ptr)
{
	if (ptr != NULL && is_vmalloc_addr(ptr))
		return vfree(ptr);

	kfree(ptr);
}

static inline int kgsl_copy_from_user(void *dest, void __user *src,
		unsigned int ksize, unsigned int usize)
{
+7 −7
Original line number Diff line number Diff line
@@ -887,7 +887,8 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
	 * routine by finding the faulted page in constant time.
	 */

	memdesc->pages = kgsl_malloc(len_alloc * sizeof(struct page *));
	memdesc->pages = kvcalloc(len_alloc, sizeof(*memdesc->pages),
		GFP_KERNEL);
	memdesc->page_count = 0;
	memdesc->size = 0;

@@ -956,7 +957,7 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
			memdesc->sgt = NULL;

			if (ret == -EADDRNOTAVAIL) {
				kgsl_free(memdesc->pages);
				kvfree(memdesc->pages);
				memset(memdesc, 0, sizeof(*memdesc));
				return ret;
			}
@@ -974,7 +975,7 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
		 * We don't need the array for secure buffers because they are
		 * not mapped to CPU
		 */
		kgsl_free(memdesc->pages);
		kvfree(memdesc->pages);
		memdesc->pages = NULL;
		memdesc->page_count = 0;

@@ -996,7 +997,7 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
			}
		}

		kgsl_free(memdesc->pages);
		kvfree(memdesc->pages);
		memset(memdesc, 0, sizeof(*memdesc));
	}

@@ -1016,11 +1017,10 @@ void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)

	if (memdesc->sgt) {
		sg_free_table(memdesc->sgt);
		kfree(memdesc->sgt);
		kvfree(memdesc->sgt);
	}

	if (memdesc->pages)
		kgsl_free(memdesc->pages);
	kvfree(memdesc->pages);
}
EXPORT_SYMBOL(kgsl_sharedmem_free);