Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4c35630c authored by Timur Tabi's avatar Timur Tabi Committed by Kumar Gala
Browse files

[POWERPC] Change rheap functions to use ulongs instead of pointers



The rheap allocation functions return a pointer, but the actual value is based
on how the heap was initialized, and so it can be anything, e.g. an offset
into a buffer.  A ulong is a better representation of the value returned by
the allocation functions.

This patch changes all of the relevant rheap functions to use a unsigned long
integers instead of a pointer.  In case of an error, the value returned is
a negative error code that has been cast to an unsigned long.  The caller can
use the IS_ERR_VALUE() macro to check for this.

All code which calls the rheap functions is updated accordingly.  Macros
IS_MURAM_ERR() and IS_DPERR(), have been deleted in favor of IS_ERR_VALUE().

Also added error checking to rh_attach_region().

Signed-off-by: default avatarTimur Tabi <timur@freescale.com>
Signed-off-by: default avatarKumar Gala <galak@kernel.crashing.org>
parent 742226c5
Loading
Loading
Loading
Loading
+67 −50
Original line number Diff line number Diff line
@@ -133,7 +133,7 @@ static rh_block_t *get_slot(rh_info_t * info)
	info->empty_slots--;

	/* Initialize */
	blk->start = NULL;
	blk->start = 0;
	blk->size = 0;
	blk->owner = NULL;

@@ -158,7 +158,7 @@ static void attach_free_block(rh_info_t * info, rh_block_t * blkn)

	/* We assume that they are aligned properly */
	size = blkn->size;
	s = (unsigned long)blkn->start;
	s = blkn->start;
	e = s + size;

	/* Find the blocks immediately before and after the given one
@@ -170,7 +170,7 @@ static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
	list_for_each(l, &info->free_list) {
		blk = list_entry(l, rh_block_t, list);

		bs = (unsigned long)blk->start;
		bs = blk->start;
		be = bs + blk->size;

		if (next == NULL && s >= bs)
@@ -188,10 +188,10 @@ static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
	}

	/* Now check if they are really adjacent */
	if (before != NULL && s != (unsigned long)before->start + before->size)
	if (before && s != (before->start + before->size))
		before = NULL;

	if (after != NULL && e != (unsigned long)after->start)
	if (after && e != after->start)
		after = NULL;

	/* No coalescing; list insert and return */
@@ -216,7 +216,7 @@ static void attach_free_block(rh_info_t * info, rh_block_t * blkn)

	/* Grow the after block backwards */
	if (before == NULL && after != NULL) {
		after->start = (int8_t *)after->start - size;
		after->start -= size;
		after->size += size;
		return;
	}
@@ -321,14 +321,14 @@ void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
}

/* Attach a free memory region, coalesces regions if adjuscent */
int rh_attach_region(rh_info_t * info, void *start, int size)
int rh_attach_region(rh_info_t * info, unsigned long start, int size)
{
	rh_block_t *blk;
	unsigned long s, e, m;
	int r;

	/* The region must be aligned */
	s = (unsigned long)start;
	s = start;
	e = s + size;
	m = info->alignment - 1;

@@ -338,9 +338,12 @@ int rh_attach_region(rh_info_t * info, void *start, int size)
	/* Round end down */
	e = e & ~m;

	if (IS_ERR_VALUE(e) || (e < s))
		return -ERANGE;

	/* Take final values */
	start = (void *)s;
	size = (int)(e - s);
	start = s;
	size = e - s;

	/* Grow the blocks, if needed */
	r = assure_empty(info, 1);
@@ -358,7 +361,7 @@ int rh_attach_region(rh_info_t * info, void *start, int size)
}

/* Detatch given address range, splits free block if needed. */
void *rh_detach_region(rh_info_t * info, void *start, int size)
unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size)
{
	struct list_head *l;
	rh_block_t *blk, *newblk;
@@ -366,10 +369,10 @@ void *rh_detach_region(rh_info_t * info, void *start, int size)

	/* Validate size */
	if (size <= 0)
		return ERR_PTR(-EINVAL);
		return (unsigned long) -EINVAL;

	/* The region must be aligned */
	s = (unsigned long)start;
	s = start;
	e = s + size;
	m = info->alignment - 1;

@@ -380,34 +383,34 @@ void *rh_detach_region(rh_info_t * info, void *start, int size)
	e = e & ~m;

	if (assure_empty(info, 1) < 0)
		return ERR_PTR(-ENOMEM);
		return (unsigned long) -ENOMEM;

	blk = NULL;
	list_for_each(l, &info->free_list) {
		blk = list_entry(l, rh_block_t, list);
		/* The range must lie entirely inside one free block */
		bs = (unsigned long)blk->start;
		be = (unsigned long)blk->start + blk->size;
		bs = blk->start;
		be = blk->start + blk->size;
		if (s >= bs && e <= be)
			break;
		blk = NULL;
	}

	if (blk == NULL)
		return ERR_PTR(-ENOMEM);
		return (unsigned long) -ENOMEM;

	/* Perfect fit */
	if (bs == s && be == e) {
		/* Delete from free list, release slot */
		list_del(&blk->list);
		release_slot(info, blk);
		return (void *)s;
		return s;
	}

	/* blk still in free list, with updated start and/or size */
	if (bs == s || be == e) {
		if (bs == s)
			blk->start = (int8_t *)blk->start + size;
			blk->start += size;
		blk->size -= size;

	} else {
@@ -416,25 +419,29 @@ void *rh_detach_region(rh_info_t * info, void *start, int size)

		/* the back free fragment */
		newblk = get_slot(info);
		newblk->start = (void *)e;
		newblk->start = e;
		newblk->size = be - e;

		list_add(&newblk->list, &blk->list);
	}

	return (void *)s;
	return s;
}

void *rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner)
/* Allocate a block of memory at the specified alignment.  The value returned
 * is an offset into the buffer initialized by rh_init(), or a negative number
 * if there is an error.
 */
unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner)
{
	struct list_head *l;
	rh_block_t *blk;
	rh_block_t *newblk;
	void *start;
	unsigned long start;

	/* Validate size, (must be power of two) */
	/* Validate size, and alignment must be power of two */
	if (size <= 0 || (alignment & (alignment - 1)) != 0)
		return ERR_PTR(-EINVAL);
		return (unsigned long) -EINVAL;

	/* given alignment larger that default rheap alignment */
	if (alignment > info->alignment)
@@ -444,7 +451,7 @@ void *rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owne
	size = (size + (info->alignment - 1)) & ~(info->alignment - 1);

	if (assure_empty(info, 1) < 0)
		return ERR_PTR(-ENOMEM);
		return (unsigned long) -ENOMEM;

	blk = NULL;
	list_for_each(l, &info->free_list) {
@@ -455,7 +462,7 @@ void *rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owne
	}

	if (blk == NULL)
		return ERR_PTR(-ENOMEM);
		return (unsigned long) -ENOMEM;

	/* Just fits */
	if (blk->size == size) {
@@ -475,7 +482,7 @@ void *rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owne
	newblk->owner = owner;

	/* blk still in free list, with updated start, size */
	blk->start = (int8_t *)blk->start + size;
	blk->start += size;
	blk->size -= size;

	start = newblk->start;
@@ -486,19 +493,25 @@ void *rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owne
	/* this is no problem with the deallocator since */
	/* we scan for pointers that lie in the blocks   */
	if (alignment > info->alignment)
		start = (void *)(((unsigned long)start + alignment - 1) &
				~(alignment - 1));
		start = (start + alignment - 1) & ~(alignment - 1);

	return start;
}

void *rh_alloc(rh_info_t * info, int size, const char *owner)
/* Allocate a block of memory at the default alignment.  The value returned is
 * an offset into the buffer initialized by rh_init(), or a negative number if
 * there is an error.
 */
unsigned long rh_alloc(rh_info_t * info, int size, const char *owner)
{
	return rh_alloc_align(info, size, info->alignment, owner);
}

/* allocate at precisely the given address */
void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner)
/* Allocate a block of memory at the given offset, rounded up to the default
 * alignment.  The value returned is an offset into the buffer initialized by
 * rh_init(), or a negative number if there is an error.
 */
unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner)
{
	struct list_head *l;
	rh_block_t *blk, *newblk1, *newblk2;
@@ -506,10 +519,10 @@ void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner)

	/* Validate size */
	if (size <= 0)
		return ERR_PTR(-EINVAL);
		return (unsigned long) -EINVAL;

	/* The region must be aligned */
	s = (unsigned long)start;
	s = start;
	e = s + size;
	m = info->alignment - 1;

@@ -520,20 +533,20 @@ void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner)
	e = e & ~m;

	if (assure_empty(info, 2) < 0)
		return ERR_PTR(-ENOMEM);
		return (unsigned long) -ENOMEM;

	blk = NULL;
	list_for_each(l, &info->free_list) {
		blk = list_entry(l, rh_block_t, list);
		/* The range must lie entirely inside one free block */
		bs = (unsigned long)blk->start;
		be = (unsigned long)blk->start + blk->size;
		bs = blk->start;
		be = blk->start + blk->size;
		if (s >= bs && e <= be)
			break;
	}

	if (blk == NULL)
		return ERR_PTR(-ENOMEM);
		return (unsigned long) -ENOMEM;

	/* Perfect fit */
	if (bs == s && be == e) {
@@ -551,7 +564,7 @@ void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner)
	/* blk still in free list, with updated start and/or size */
	if (bs == s || be == e) {
		if (bs == s)
			blk->start = (int8_t *)blk->start + size;
			blk->start += size;
		blk->size -= size;

	} else {
@@ -560,14 +573,14 @@ void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner)

		/* The back free fragment */
		newblk2 = get_slot(info);
		newblk2->start = (void *)e;
		newblk2->start = e;
		newblk2->size = be - e;

		list_add(&newblk2->list, &blk->list);
	}

	newblk1 = get_slot(info);
	newblk1->start = (void *)s;
	newblk1->start = s;
	newblk1->size = e - s;
	newblk1->owner = owner;

@@ -577,7 +590,11 @@ void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner)
	return start;
}

int rh_free(rh_info_t * info, void *start)
/* Deallocate the memory previously allocated by one of the rh_alloc functions.
 * The return value is the size of the deallocated block, or a negative number
 * if there is an error.
 */
int rh_free(rh_info_t * info, unsigned long start)
{
	rh_block_t *blk, *blk2;
	struct list_head *l;
@@ -642,7 +659,7 @@ int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
	return nr;
}

int rh_set_owner(rh_info_t * info, void *start, const char *owner)
int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner)
{
	rh_block_t *blk, *blk2;
	struct list_head *l;
@@ -684,8 +701,8 @@ void rh_dump(rh_info_t * info)
		nr = maxnr;
	for (i = 0; i < nr; i++)
		printk(KERN_INFO
		       "    0x%p-0x%p (%u)\n",
		       st[i].start, (int8_t *) st[i].start + st[i].size,
		       "    0x%lx-0x%lx (%u)\n",
		       st[i].start, st[i].start + st[i].size,
		       st[i].size);
	printk(KERN_INFO "\n");

@@ -695,8 +712,8 @@ void rh_dump(rh_info_t * info)
		nr = maxnr;
	for (i = 0; i < nr; i++)
		printk(KERN_INFO
		       "    0x%p-0x%p (%u) %s\n",
		       st[i].start, (int8_t *) st[i].start + st[i].size,
		       "    0x%lx-0x%lx (%u) %s\n",
		       st[i].start, st[i].start + st[i].size,
		       st[i].size, st[i].owner != NULL ? st[i].owner : "");
	printk(KERN_INFO "\n");
}
@@ -704,6 +721,6 @@ void rh_dump(rh_info_t * info)
void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
{
	printk(KERN_INFO
	       "blk @0x%p: 0x%p-0x%p (%u)\n",
	       blk, blk->start, (int8_t *) blk->start + blk->size, blk->size);
	       "blk @0x%p: 0x%lx-0x%lx (%u)\n",
	       blk, blk->start, blk->start + blk->size, blk->size);
}
+10 −10
Original line number Diff line number Diff line
@@ -330,7 +330,7 @@ void m8xx_cpm_dpinit(void)
	 * with the processor and the microcode patches applied / activated.
	 * But the following should be at least safe.
	 */
	rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
	rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
}

/*
@@ -338,9 +338,9 @@ void m8xx_cpm_dpinit(void)
 * This function returns an offset into the DPRAM area.
 * Use cpm_dpram_addr() to get the virtual address of the area.
 */
uint cpm_dpalloc(uint size, uint align)
unsigned long cpm_dpalloc(uint size, uint align)
{
	void *start;
	unsigned long start;
	unsigned long flags;

	spin_lock_irqsave(&cpm_dpmem_lock, flags);
@@ -352,30 +352,30 @@ uint cpm_dpalloc(uint size, uint align)
}
EXPORT_SYMBOL(cpm_dpalloc);

int cpm_dpfree(uint offset)
int cpm_dpfree(unsigned long offset)
{
	int ret;
	unsigned long flags;

	spin_lock_irqsave(&cpm_dpmem_lock, flags);
	ret = rh_free(&cpm_dpmem_info, (void *)offset);
	ret = rh_free(&cpm_dpmem_info, offset);
	spin_unlock_irqrestore(&cpm_dpmem_lock, flags);

	return ret;
}
EXPORT_SYMBOL(cpm_dpfree);

uint cpm_dpalloc_fixed(uint offset, uint size, uint align)
unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align)
{
	void *start;
	unsigned long start;
	unsigned long flags;

	spin_lock_irqsave(&cpm_dpmem_lock, flags);
	cpm_dpmem_info.alignment = align;
	start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc");
	start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc");
	spin_unlock_irqrestore(&cpm_dpmem_lock, flags);

	return (uint)start;
	return start;
}
EXPORT_SYMBOL(cpm_dpalloc_fixed);

@@ -385,7 +385,7 @@ void cpm_dpdump(void)
}
EXPORT_SYMBOL(cpm_dpdump);

void *cpm_dpram_addr(uint offset)
void *cpm_dpram_addr(unsigned long offset)
{
	return (void *)(dpram_vbase + offset);
}
+10 −11
Original line number Diff line number Diff line
@@ -248,15 +248,14 @@ static void cpm2_dpinit(void)
	 * varies with the processor and the microcode patches activated.
	 * But the following should be at least safe.
	 */
	rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE,
			CPM_DATAONLY_SIZE);
	rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
}

/* This function returns an index into the DPRAM area.
 */
uint cpm_dpalloc(uint size, uint align)
unsigned long cpm_dpalloc(uint size, uint align)
{
	void *start;
	unsigned long start;
	unsigned long flags;

	spin_lock_irqsave(&cpm_dpmem_lock, flags);
@@ -268,13 +267,13 @@ uint cpm_dpalloc(uint size, uint align)
}
EXPORT_SYMBOL(cpm_dpalloc);

int cpm_dpfree(uint offset)
int cpm_dpfree(unsigned long offset)
{
	int ret;
	unsigned long flags;

	spin_lock_irqsave(&cpm_dpmem_lock, flags);
	ret = rh_free(&cpm_dpmem_info, (void *)offset);
	ret = rh_free(&cpm_dpmem_info, offset);
	spin_unlock_irqrestore(&cpm_dpmem_lock, flags);

	return ret;
@@ -282,17 +281,17 @@ int cpm_dpfree(uint offset)
EXPORT_SYMBOL(cpm_dpfree);

/* not sure if this is ever needed */
uint cpm_dpalloc_fixed(uint offset, uint size, uint align)
unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align)
{
	void *start;
	unsigned long start;
	unsigned long flags;

	spin_lock_irqsave(&cpm_dpmem_lock, flags);
	cpm_dpmem_info.alignment = align;
	start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc");
	start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc");
	spin_unlock_irqrestore(&cpm_dpmem_lock, flags);

	return (uint)start;
	return start;
}
EXPORT_SYMBOL(cpm_dpalloc_fixed);

@@ -302,7 +301,7 @@ void cpm_dpdump(void)
}
EXPORT_SYMBOL(cpm_dpdump);

void *cpm_dpram_addr(uint offset)
void *cpm_dpram_addr(unsigned long offset)
{
	return (void *)(im_dprambase + offset);
}
+14 −15
Original line number Diff line number Diff line
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(qe_put_snum);
static int qe_sdma_init(void)
{
	struct sdma *sdma = &qe_immr->sdma;
	u32 sdma_buf_offset;
	unsigned long sdma_buf_offset;

	if (!sdma)
		return -ENODEV;
@@ -252,10 +252,10 @@ static int qe_sdma_init(void)
	/* allocate 2 internal temporary buffers (512 bytes size each) for
	 * the SDMA */
 	sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
	if (IS_MURAM_ERR(sdma_buf_offset))
	if (IS_ERR_VALUE(sdma_buf_offset))
		return -ENOMEM;

	out_be32(&sdma->sdebcr, sdma_buf_offset & QE_SDEBCR_BA_MASK);
	out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
 	out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
 					(0x1 << QE_SDMR_CEN_SHIFT)));

@@ -291,33 +291,32 @@ static void qe_muram_init(void)
	if ((np = of_find_node_by_name(NULL, "data-only")) != NULL) {
		address = *of_get_address(np, 0, &size, &flags);
		of_node_put(np);
		rh_attach_region(&qe_muram_info,
			(void *)address, (int)size);
		rh_attach_region(&qe_muram_info, address, (int) size);
	}
}

/* This function returns an index into the MURAM area.
 */
u32 qe_muram_alloc(u32 size, u32 align)
unsigned long qe_muram_alloc(int size, int align)
{
	void *start;
	unsigned long start;
	unsigned long flags;

	spin_lock_irqsave(&qe_muram_lock, flags);
	start = rh_alloc_align(&qe_muram_info, size, align, "QE");
	spin_unlock_irqrestore(&qe_muram_lock, flags);

	return (u32) start;
	return start;
}
EXPORT_SYMBOL(qe_muram_alloc);

int qe_muram_free(u32 offset)
int qe_muram_free(unsigned long offset)
{
	int ret;
	unsigned long flags;

	spin_lock_irqsave(&qe_muram_lock, flags);
	ret = rh_free(&qe_muram_info, (void *)offset);
	ret = rh_free(&qe_muram_info, offset);
	spin_unlock_irqrestore(&qe_muram_lock, flags);

	return ret;
@@ -325,16 +324,16 @@ int qe_muram_free(u32 offset)
EXPORT_SYMBOL(qe_muram_free);

/* not sure if this is ever needed */
u32 qe_muram_alloc_fixed(u32 offset, u32 size)
unsigned long qe_muram_alloc_fixed(unsigned long offset, int size)
{
	void *start;
	unsigned long start;
	unsigned long flags;

	spin_lock_irqsave(&qe_muram_lock, flags);
	start = rh_alloc_fixed(&qe_muram_info, (void *)offset, size, "commproc");
	start = rh_alloc_fixed(&qe_muram_info, offset, size, "commproc");
	spin_unlock_irqrestore(&qe_muram_lock, flags);

	return (u32) start;
	return start;
}
EXPORT_SYMBOL(qe_muram_alloc_fixed);

@@ -344,7 +343,7 @@ void qe_muram_dump(void)
}
EXPORT_SYMBOL(qe_muram_dump);

void *qe_muram_addr(u32 offset)
void *qe_muram_addr(unsigned long offset)
{
	return (void *)&qe_immr->muram[offset];
}
+3 −2
Original line number Diff line number Diff line
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/interrupt.h>
#include <linux/err.h>

#include <asm/io.h>
#include <asm/immap_qe.h>
@@ -268,7 +269,7 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
	/* Allocate memory for Tx Virtual Fifo */
	uccf->ucc_fast_tx_virtual_fifo_base_offset =
	    qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
	if (IS_MURAM_ERR(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
	if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
		printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO", __FUNCTION__);
		uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
		ucc_fast_free(uccf);
@@ -280,7 +281,7 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
		qe_muram_alloc(uf_info->urfs +
			   UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
			   UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
	if (IS_MURAM_ERR(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
	if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
		printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO", __FUNCTION__);
		uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
		ucc_fast_free(uccf);
Loading