Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e87aa773 authored by Matthew Wilcox's avatar Matthew Wilcox
Browse files

dmapool: Fix style problems



Run Lindent and fix all issues reported by checkpatch.pl

Signed-off-by: default avatarMatthew Wilcox <willy@linux.intel.com>
parent 141e9d4b
Loading
Loading
Loading
Loading
+142 −146
Original line number Original line Diff line number Diff line
@@ -77,6 +77,7 @@ show_pools (struct device *dev, struct device_attribute *attr, char *buf)


	return PAGE_SIZE - size;
	return PAGE_SIZE - size;
}
}

static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);


/**
/**
@@ -100,8 +101,7 @@ static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL);
 * addressing restrictions on individual DMA transfers, such as not crossing
 * addressing restrictions on individual DMA transfers, such as not crossing
 * boundaries of 4KBytes.
 * boundaries of 4KBytes.
 */
 */
struct dma_pool *
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
dma_pool_create (const char *name, struct device *dev,
				 size_t size, size_t align, size_t allocation)
				 size_t size, size_t align, size_t allocation)
{
{
	struct dma_pool *retval;
	struct dma_pool *retval;
@@ -122,11 +122,13 @@ dma_pool_create (const char *name, struct device *dev,
			allocation = size;
			allocation = size;
		else
		else
			allocation = PAGE_SIZE;
			allocation = PAGE_SIZE;
		// FIXME: round up for less fragmentation
		/* FIXME: round up for less fragmentation */
	} else if (allocation < size)
	} else if (allocation < size)
		return NULL;
		return NULL;


	if (!(retval = kmalloc_node (sizeof *retval, GFP_KERNEL, dev_to_node(dev))))
	if (!
	    (retval =
	     kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev))))
		return retval;
		return retval;


	strlcpy(retval->name, name, sizeof retval->name);
	strlcpy(retval->name, name, sizeof retval->name);
@@ -161,10 +163,9 @@ dma_pool_create (const char *name, struct device *dev,


	return retval;
	return retval;
}
}
EXPORT_SYMBOL(dma_pool_create);



static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
static struct dma_page *
pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags)
{
{
	struct dma_page *page;
	struct dma_page *page;
	int mapsize;
	int mapsize;
@@ -178,10 +179,9 @@ pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags)
		return NULL;
		return NULL;
	page->vaddr = dma_alloc_coherent(pool->dev,
	page->vaddr = dma_alloc_coherent(pool->dev,
					 pool->allocation,
					 pool->allocation,
					    &page->dma,
					 &page->dma, mem_flags);
					    mem_flags);
	if (page->vaddr) {
	if (page->vaddr) {
		memset (page->bitmap, 0xff, mapsize);	// bit set == free
		memset(page->bitmap, 0xff, mapsize);	/* bit set == free */
#ifdef	CONFIG_DEBUG_SLAB
#ifdef	CONFIG_DEBUG_SLAB
		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
#endif
#endif
@@ -194,9 +194,7 @@ pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags)
	return page;
	return page;
}
}



static inline int is_page_busy(int blocks, unsigned long *bitmap)
static inline int
is_page_busy (int blocks, unsigned long *bitmap)
{
{
	while (blocks > 0) {
	while (blocks > 0) {
		if (*bitmap++ != ~0UL)
		if (*bitmap++ != ~0UL)
@@ -206,8 +204,7 @@ is_page_busy (int blocks, unsigned long *bitmap)
	return 0;
	return 0;
}
}


static void
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
pool_free_page (struct dma_pool *pool, struct dma_page *page)
{
{
	dma_addr_t dma = page->dma;
	dma_addr_t dma = page->dma;


@@ -219,7 +216,6 @@ pool_free_page (struct dma_pool *pool, struct dma_page *page)
	kfree(page);
	kfree(page);
}
}



/**
/**
 * dma_pool_destroy - destroys a pool of dma memory blocks.
 * dma_pool_destroy - destroys a pool of dma memory blocks.
 * @pool: dma pool that will be destroyed
 * @pool: dma pool that will be destroyed
@@ -228,8 +224,7 @@ pool_free_page (struct dma_pool *pool, struct dma_page *page)
 * Caller guarantees that no more memory from the pool is in use,
 * Caller guarantees that no more memory from the pool is in use,
 * and that nothing will try to use the pool after this call.
 * and that nothing will try to use the pool after this call.
 */
 */
void
void dma_pool_destroy(struct dma_pool *pool)
dma_pool_destroy (struct dma_pool *pool)
{
{
	mutex_lock(&pools_lock);
	mutex_lock(&pools_lock);
	list_del(&pool->pools);
	list_del(&pool->pools);
@@ -243,10 +238,12 @@ dma_pool_destroy (struct dma_pool *pool)
				  struct dma_page, page_list);
				  struct dma_page, page_list);
		if (is_page_busy(pool->blocks_per_page, page->bitmap)) {
		if (is_page_busy(pool->blocks_per_page, page->bitmap)) {
			if (pool->dev)
			if (pool->dev)
				dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n",
				dev_err(pool->dev,
					"dma_pool_destroy %s, %p busy\n",
					pool->name, page->vaddr);
					pool->name, page->vaddr);
			else
			else
				printk (KERN_ERR "dma_pool_destroy %s, %p busy\n",
				printk(KERN_ERR
				       "dma_pool_destroy %s, %p busy\n",
				       pool->name, page->vaddr);
				       pool->name, page->vaddr);
			/* leak the still-in-use consistent memory */
			/* leak the still-in-use consistent memory */
			list_del(&page->page_list);
			list_del(&page->page_list);
@@ -257,7 +254,7 @@ dma_pool_destroy (struct dma_pool *pool)


	kfree(pool);
	kfree(pool);
}
}

EXPORT_SYMBOL(dma_pool_destroy);


/**
/**
 * dma_pool_alloc - get a block of consistent memory
 * dma_pool_alloc - get a block of consistent memory
@@ -269,8 +266,8 @@ dma_pool_destroy (struct dma_pool *pool)
 * and reports its dma address through the handle.
 * and reports its dma address through the handle.
 * If such a memory block can't be allocated, null is returned.
 * If such a memory block can't be allocated, null is returned.
 */
 */
void *
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle)
		     dma_addr_t *handle)
{
{
	unsigned long flags;
	unsigned long flags;
	struct dma_page *page;
	struct dma_page *page;
@@ -284,8 +281,7 @@ dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle)
		int i;
		int i;
		/* only cachable accesses here ... */
		/* only cachable accesses here ... */
		for (map = 0, i = 0;
		for (map = 0, i = 0;
				i < pool->blocks_per_page;
		     i < pool->blocks_per_page; i += BITS_PER_LONG, map++) {
				i += BITS_PER_LONG, map++) {
			if (page->bitmap[map] == 0)
			if (page->bitmap[map] == 0)
				continue;
				continue;
			block = ffz(~page->bitmap[map]);
			block = ffz(~page->bitmap[map]);
@@ -297,7 +293,8 @@ dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle)
			}
			}
		}
		}
	}
	}
	if (!(page = pool_alloc_page (pool, GFP_ATOMIC))) {
	page = pool_alloc_page(pool, GFP_ATOMIC);
	if (!page) {
		if (mem_flags & __GFP_WAIT) {
		if (mem_flags & __GFP_WAIT) {
			DECLARE_WAITQUEUE(wait, current);
			DECLARE_WAITQUEUE(wait, current);


@@ -327,10 +324,9 @@ dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle)
	spin_unlock_irqrestore(&pool->lock, flags);
	spin_unlock_irqrestore(&pool->lock, flags);
	return retval;
	return retval;
}
}
EXPORT_SYMBOL(dma_pool_alloc);



static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
static struct dma_page *
pool_find_page (struct dma_pool *pool, dma_addr_t dma)
{
{
	unsigned long flags;
	unsigned long flags;
	struct dma_page *page;
	struct dma_page *page;
@@ -348,7 +344,6 @@ pool_find_page (struct dma_pool *pool, dma_addr_t dma)
	return page;
	return page;
}
}



/**
/**
 * dma_pool_free - put block back into dma pool
 * dma_pool_free - put block back into dma pool
 * @pool: the dma pool holding the block
 * @pool: the dma pool holding the block
@@ -358,16 +353,17 @@ pool_find_page (struct dma_pool *pool, dma_addr_t dma)
 * Caller promises neither device nor driver will again touch this block
 * Caller promises neither device nor driver will again touch this block
 * unless it is first re-allocated.
 * unless it is first re-allocated.
 */
 */
void
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma)
{
{
	struct dma_page *page;
	struct dma_page *page;
	unsigned long flags;
	unsigned long flags;
	int map, block;
	int map, block;


	if ((page = pool_find_page(pool, dma)) == NULL) {
	page = pool_find_page(pool, dma);
	if (!page) {
		if (pool->dev)
		if (pool->dev)
			dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n",
			dev_err(pool->dev,
				"dma_pool_free %s, %p/%lx (bad dma)\n",
				pool->name, vaddr, (unsigned long)dma);
				pool->name, vaddr, (unsigned long)dma);
		else
		else
			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
@@ -383,19 +379,23 @@ dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma)
#ifdef	CONFIG_DEBUG_SLAB
#ifdef	CONFIG_DEBUG_SLAB
	if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
	if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
		if (pool->dev)
		if (pool->dev)
			dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
			dev_err(pool->dev,
				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
				pool->name, vaddr, (unsigned long long)dma);
				pool->name, vaddr, (unsigned long long)dma);
		else
		else
			printk (KERN_ERR "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
			printk(KERN_ERR
			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
			       pool->name, vaddr, (unsigned long long)dma);
			       pool->name, vaddr, (unsigned long long)dma);
		return;
		return;
	}
	}
	if (page->bitmap[map] & (1UL << block)) {
	if (page->bitmap[map] & (1UL << block)) {
		if (pool->dev)
		if (pool->dev)
			dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n",
			dev_err(pool->dev,
				"dma_pool_free %s, dma %Lx already free\n",
				pool->name, (unsigned long long)dma);
				pool->name, (unsigned long long)dma);
		else
		else
			printk (KERN_ERR "dma_pool_free %s, dma %Lx already free\n",
			printk(KERN_ERR
			       "dma_pool_free %s, dma %Lx already free\n",
			       pool->name, (unsigned long long)dma);
			       pool->name, (unsigned long long)dma);
		return;
		return;
	}
	}
@@ -414,6 +414,7 @@ dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma)
	 */
	 */
	spin_unlock_irqrestore(&pool->lock, flags);
	spin_unlock_irqrestore(&pool->lock, flags);
}
}
EXPORT_SYMBOL(dma_pool_free);


/*
/*
 * Managed DMA pool
 * Managed DMA pool
@@ -458,6 +459,7 @@ struct dma_pool *dmam_pool_create(const char *name, struct device *dev,


	return pool;
	return pool;
}
}
EXPORT_SYMBOL(dmam_pool_create);


/**
/**
 * dmam_pool_destroy - Managed dma_pool_destroy()
 * dmam_pool_destroy - Managed dma_pool_destroy()
@@ -472,10 +474,4 @@ void dmam_pool_destroy(struct dma_pool *pool)
	dma_pool_destroy(pool);
	dma_pool_destroy(pool);
	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
}
}

EXPORT_SYMBOL (dma_pool_create);
EXPORT_SYMBOL (dma_pool_destroy);
EXPORT_SYMBOL (dma_pool_alloc);
EXPORT_SYMBOL (dma_pool_free);
EXPORT_SYMBOL (dmam_pool_create);
EXPORT_SYMBOL(dmam_pool_destroy);
EXPORT_SYMBOL(dmam_pool_destroy);