Loading mm/dmapool.c +142 −146 Original line number Diff line number Diff line Loading @@ -77,6 +77,7 @@ show_pools (struct device *dev, struct device_attribute *attr, char *buf) return PAGE_SIZE - size; } static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); /** Loading @@ -100,8 +101,7 @@ static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL); * addressing restrictions on individual DMA transfers, such as not crossing * boundaries of 4KBytes. */ struct dma_pool * dma_pool_create (const char *name, struct device *dev, struct dma_pool *dma_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t allocation) { struct dma_pool *retval; Loading @@ -122,11 +122,13 @@ dma_pool_create (const char *name, struct device *dev, allocation = size; else allocation = PAGE_SIZE; // FIXME: round up for less fragmentation /* FIXME: round up for less fragmentation */ } else if (allocation < size) return NULL; if (!(retval = kmalloc_node (sizeof *retval, GFP_KERNEL, dev_to_node(dev)))) if (! (retval = kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev)))) return retval; strlcpy(retval->name, name, sizeof retval->name); Loading Loading @@ -161,10 +163,9 @@ dma_pool_create (const char *name, struct device *dev, return retval; } EXPORT_SYMBOL(dma_pool_create); static struct dma_page * pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags) static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) { struct dma_page *page; int mapsize; Loading @@ -178,10 +179,9 @@ pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags) return NULL; page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, &page->dma, mem_flags); &page->dma, mem_flags); if (page->vaddr) { memset (page->bitmap, 0xff, mapsize); // bit set == free memset(page->bitmap, 0xff, mapsize); /* bit set == free */ #ifdef CONFIG_DEBUG_SLAB memset(page->vaddr, POOL_POISON_FREED, pool->allocation); #endif Loading @@ -194,9 +194,7 @@ pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags) return page; } static inline int is_page_busy (int blocks, unsigned long *bitmap) static inline int is_page_busy(int blocks, unsigned long *bitmap) { while (blocks > 0) { if (*bitmap++ != ~0UL) Loading @@ -206,8 +204,7 @@ is_page_busy (int blocks, unsigned long *bitmap) return 0; } static void pool_free_page (struct dma_pool *pool, struct dma_page *page) static void pool_free_page(struct dma_pool *pool, struct dma_page *page) { dma_addr_t dma = page->dma; Loading @@ -219,7 +216,6 @@ pool_free_page (struct dma_pool *pool, struct dma_page *page) kfree(page); } /** * dma_pool_destroy - destroys a pool of dma memory blocks. * @pool: dma pool that will be destroyed Loading @@ -228,8 +224,7 @@ pool_free_page (struct dma_pool *pool, struct dma_page *page) * Caller guarantees that no more memory from the pool is in use, * and that nothing will try to use the pool after this call. */ void dma_pool_destroy (struct dma_pool *pool) void dma_pool_destroy(struct dma_pool *pool) { mutex_lock(&pools_lock); list_del(&pool->pools); Loading @@ -243,10 +238,12 @@ dma_pool_destroy (struct dma_pool *pool) struct dma_page, page_list); if (is_page_busy(pool->blocks_per_page, page->bitmap)) { if (pool->dev) dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n", dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n", pool->name, page->vaddr); else printk (KERN_ERR "dma_pool_destroy %s, %p busy\n", printk(KERN_ERR "dma_pool_destroy %s, %p busy\n", pool->name, page->vaddr); /* leak the still-in-use consistent memory */ list_del(&page->page_list); Loading @@ -257,7 +254,7 @@ dma_pool_destroy (struct dma_pool *pool) kfree(pool); } EXPORT_SYMBOL(dma_pool_destroy); /** * dma_pool_alloc - get a block of consistent memory Loading @@ -269,8 +266,8 @@ dma_pool_destroy (struct dma_pool *pool) * and reports its dma address through the handle. * If such a memory block can't be allocated, null is returned. */ void * dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) { unsigned long flags; struct dma_page *page; Loading @@ -284,8 +281,7 @@ dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) int i; /* only cachable accesses here ... */ for (map = 0, i = 0; i < pool->blocks_per_page; i += BITS_PER_LONG, map++) { i < pool->blocks_per_page; i += BITS_PER_LONG, map++) { if (page->bitmap[map] == 0) continue; block = ffz(~page->bitmap[map]); Loading @@ -297,7 +293,8 @@ dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) } } } if (!(page = pool_alloc_page (pool, GFP_ATOMIC))) { page = pool_alloc_page(pool, GFP_ATOMIC); if (!page) { if (mem_flags & __GFP_WAIT) { DECLARE_WAITQUEUE(wait, current); Loading Loading @@ -327,10 +324,9 @@ dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) spin_unlock_irqrestore(&pool->lock, flags); return retval; } EXPORT_SYMBOL(dma_pool_alloc); static struct dma_page * pool_find_page (struct dma_pool *pool, dma_addr_t dma) static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) { unsigned long flags; struct dma_page *page; Loading @@ -348,7 +344,6 @@ pool_find_page (struct dma_pool *pool, dma_addr_t dma) return page; } /** * dma_pool_free - put block back into dma pool * @pool: the dma pool holding the block Loading @@ -358,16 +353,17 @@ pool_find_page (struct dma_pool *pool, dma_addr_t dma) * Caller promises neither device nor driver will again touch this block * unless it is first re-allocated. */ void dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma) void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) { struct dma_page *page; unsigned long flags; int map, block; if ((page = pool_find_page(pool, dma)) == NULL) { page = pool_find_page(pool, dma); if (!page) { if (pool->dev) dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n", dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n", pool->name, vaddr, (unsigned long)dma); else printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", Loading @@ -383,19 +379,23 @@ dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma) #ifdef CONFIG_DEBUG_SLAB if (((dma - page->dma) + (void *)page->vaddr) != vaddr) { if (pool->dev) dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n", dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n", pool->name, vaddr, (unsigned long long)dma); else printk (KERN_ERR "dma_pool_free %s, %p (bad vaddr)/%Lx\n", printk(KERN_ERR "dma_pool_free %s, %p (bad vaddr)/%Lx\n", pool->name, vaddr, (unsigned long long)dma); return; } if (page->bitmap[map] & (1UL << block)) { if (pool->dev) dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n", dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n", pool->name, (unsigned long long)dma); else printk (KERN_ERR "dma_pool_free %s, dma %Lx already free\n", printk(KERN_ERR "dma_pool_free %s, dma %Lx already free\n", pool->name, (unsigned long long)dma); return; } Loading @@ -414,6 +414,7 @@ dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma) */ spin_unlock_irqrestore(&pool->lock, flags); } EXPORT_SYMBOL(dma_pool_free); /* * Managed DMA pool Loading Loading @@ -458,6 +459,7 @@ struct dma_pool *dmam_pool_create(const char *name, struct device *dev, return pool; } EXPORT_SYMBOL(dmam_pool_create); /** * dmam_pool_destroy - Managed dma_pool_destroy() Loading @@ -472,10 +474,4 @@ void dmam_pool_destroy(struct dma_pool *pool) dma_pool_destroy(pool); WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); } EXPORT_SYMBOL (dma_pool_create); EXPORT_SYMBOL (dma_pool_destroy); EXPORT_SYMBOL (dma_pool_alloc); EXPORT_SYMBOL (dma_pool_free); EXPORT_SYMBOL (dmam_pool_create); EXPORT_SYMBOL(dmam_pool_destroy); Loading
mm/dmapool.c +142 −146 Original line number Diff line number Diff line Loading @@ -77,6 +77,7 @@ show_pools (struct device *dev, struct device_attribute *attr, char *buf) return PAGE_SIZE - size; } static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); /** Loading @@ -100,8 +101,7 @@ static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL); * addressing restrictions on individual DMA transfers, such as not crossing * boundaries of 4KBytes. */ struct dma_pool * dma_pool_create (const char *name, struct device *dev, struct dma_pool *dma_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t allocation) { struct dma_pool *retval; Loading @@ -122,11 +122,13 @@ dma_pool_create (const char *name, struct device *dev, allocation = size; else allocation = PAGE_SIZE; // FIXME: round up for less fragmentation /* FIXME: round up for less fragmentation */ } else if (allocation < size) return NULL; if (!(retval = kmalloc_node (sizeof *retval, GFP_KERNEL, dev_to_node(dev)))) if (! (retval = kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev)))) return retval; strlcpy(retval->name, name, sizeof retval->name); Loading Loading @@ -161,10 +163,9 @@ dma_pool_create (const char *name, struct device *dev, return retval; } EXPORT_SYMBOL(dma_pool_create); static struct dma_page * pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags) static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) { struct dma_page *page; int mapsize; Loading @@ -178,10 +179,9 @@ pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags) return NULL; page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, &page->dma, mem_flags); &page->dma, mem_flags); if (page->vaddr) { memset (page->bitmap, 0xff, mapsize); // bit set == free memset(page->bitmap, 0xff, mapsize); /* bit set == free */ #ifdef CONFIG_DEBUG_SLAB memset(page->vaddr, POOL_POISON_FREED, pool->allocation); #endif Loading @@ -194,9 +194,7 @@ pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags) return page; } static inline int is_page_busy (int blocks, unsigned long *bitmap) static inline int is_page_busy(int blocks, unsigned long *bitmap) { while (blocks > 0) { if (*bitmap++ != ~0UL) Loading @@ -206,8 +204,7 @@ is_page_busy (int blocks, unsigned long *bitmap) return 0; } static void pool_free_page (struct dma_pool *pool, struct dma_page *page) static void pool_free_page(struct dma_pool *pool, struct dma_page *page) { dma_addr_t dma = page->dma; Loading @@ -219,7 +216,6 @@ pool_free_page (struct dma_pool *pool, struct dma_page *page) kfree(page); } /** * dma_pool_destroy - destroys a pool of dma memory blocks. * @pool: dma pool that will be destroyed Loading @@ -228,8 +224,7 @@ pool_free_page (struct dma_pool *pool, struct dma_page *page) * Caller guarantees that no more memory from the pool is in use, * and that nothing will try to use the pool after this call. */ void dma_pool_destroy (struct dma_pool *pool) void dma_pool_destroy(struct dma_pool *pool) { mutex_lock(&pools_lock); list_del(&pool->pools); Loading @@ -243,10 +238,12 @@ dma_pool_destroy (struct dma_pool *pool) struct dma_page, page_list); if (is_page_busy(pool->blocks_per_page, page->bitmap)) { if (pool->dev) dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n", dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n", pool->name, page->vaddr); else printk (KERN_ERR "dma_pool_destroy %s, %p busy\n", printk(KERN_ERR "dma_pool_destroy %s, %p busy\n", pool->name, page->vaddr); /* leak the still-in-use consistent memory */ list_del(&page->page_list); Loading @@ -257,7 +254,7 @@ dma_pool_destroy (struct dma_pool *pool) kfree(pool); } EXPORT_SYMBOL(dma_pool_destroy); /** * dma_pool_alloc - get a block of consistent memory Loading @@ -269,8 +266,8 @@ dma_pool_destroy (struct dma_pool *pool) * and reports its dma address through the handle. * If such a memory block can't be allocated, null is returned. */ void * dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) { unsigned long flags; struct dma_page *page; Loading @@ -284,8 +281,7 @@ dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) int i; /* only cachable accesses here ... */ for (map = 0, i = 0; i < pool->blocks_per_page; i += BITS_PER_LONG, map++) { i < pool->blocks_per_page; i += BITS_PER_LONG, map++) { if (page->bitmap[map] == 0) continue; block = ffz(~page->bitmap[map]); Loading @@ -297,7 +293,8 @@ dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) } } } if (!(page = pool_alloc_page (pool, GFP_ATOMIC))) { page = pool_alloc_page(pool, GFP_ATOMIC); if (!page) { if (mem_flags & __GFP_WAIT) { DECLARE_WAITQUEUE(wait, current); Loading Loading @@ -327,10 +324,9 @@ dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) spin_unlock_irqrestore(&pool->lock, flags); return retval; } EXPORT_SYMBOL(dma_pool_alloc); static struct dma_page * pool_find_page (struct dma_pool *pool, dma_addr_t dma) static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) { unsigned long flags; struct dma_page *page; Loading @@ -348,7 +344,6 @@ pool_find_page (struct dma_pool *pool, dma_addr_t dma) return page; } /** * dma_pool_free - put block back into dma pool * @pool: the dma pool holding the block Loading @@ -358,16 +353,17 @@ pool_find_page (struct dma_pool *pool, dma_addr_t dma) * Caller promises neither device nor driver will again touch this block * unless it is first re-allocated. */ void dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma) void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) { struct dma_page *page; unsigned long flags; int map, block; if ((page = pool_find_page(pool, dma)) == NULL) { page = pool_find_page(pool, dma); if (!page) { if (pool->dev) dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n", dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n", pool->name, vaddr, (unsigned long)dma); else printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", Loading @@ -383,19 +379,23 @@ dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma) #ifdef CONFIG_DEBUG_SLAB if (((dma - page->dma) + (void *)page->vaddr) != vaddr) { if (pool->dev) dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n", dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n", pool->name, vaddr, (unsigned long long)dma); else printk (KERN_ERR "dma_pool_free %s, %p (bad vaddr)/%Lx\n", printk(KERN_ERR "dma_pool_free %s, %p (bad vaddr)/%Lx\n", pool->name, vaddr, (unsigned long long)dma); return; } if (page->bitmap[map] & (1UL << block)) { if (pool->dev) dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n", dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n", pool->name, (unsigned long long)dma); else printk (KERN_ERR "dma_pool_free %s, dma %Lx already free\n", printk(KERN_ERR "dma_pool_free %s, dma %Lx already free\n", pool->name, (unsigned long long)dma); return; } Loading @@ -414,6 +414,7 @@ dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma) */ spin_unlock_irqrestore(&pool->lock, flags); } EXPORT_SYMBOL(dma_pool_free); /* * Managed DMA pool Loading Loading @@ -458,6 +459,7 @@ struct dma_pool *dmam_pool_create(const char *name, struct device *dev, return pool; } EXPORT_SYMBOL(dmam_pool_create); /** * dmam_pool_destroy - Managed dma_pool_destroy() Loading @@ -472,10 +474,4 @@ void dmam_pool_destroy(struct dma_pool *pool) dma_pool_destroy(pool); WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); } EXPORT_SYMBOL (dma_pool_create); EXPORT_SYMBOL (dma_pool_destroy); EXPORT_SYMBOL (dma_pool_alloc); EXPORT_SYMBOL (dma_pool_free); EXPORT_SYMBOL (dmam_pool_create); EXPORT_SYMBOL(dmam_pool_destroy);