Loading arch/arm/common/dmabounce.c +86 −79 Original line number Original line Diff line number Diff line Loading @@ -33,8 +33,8 @@ #include <asm/cacheflush.h> #include <asm/cacheflush.h> #undef DEBUG #undef DEBUG #undef STATS #undef STATS #ifdef STATS #ifdef STATS #define DO_STATS(X) do { X ; } while (0) #define DO_STATS(X) do { X ; } while (0) #else #else Loading @@ -52,26 +52,31 @@ struct safe_buffer { int direction; int direction; /* safe buffer info */ /* safe buffer info */ struct dma_pool *pool; struct dmabounce_pool *pool; void *safe; void *safe; dma_addr_t safe_dma_addr; dma_addr_t safe_dma_addr; }; }; struct dmabounce_pool { unsigned long size; struct dma_pool *pool; #ifdef STATS unsigned long allocs; #endif }; struct dmabounce_device_info { struct dmabounce_device_info { struct list_head node; struct list_head node; struct device *dev; struct device *dev; struct dma_pool *small_buffer_pool; struct dma_pool *large_buffer_pool; struct list_head safe_buffers; struct list_head safe_buffers; unsigned long small_buffer_size, large_buffer_size; #ifdef STATS #ifdef STATS unsigned long sbp_allocs; unsigned long lbp_allocs; unsigned long total_allocs; unsigned long total_allocs; unsigned long map_op_count; unsigned long map_op_count; unsigned long bounce_count; unsigned long bounce_count; #endif #endif struct dmabounce_pool small; struct dmabounce_pool large; }; }; static LIST_HEAD(dmabounce_devs); static LIST_HEAD(dmabounce_devs); Loading @@ -82,9 +87,9 @@ static void print_alloc_stats(struct dmabounce_device_info *device_info) printk(KERN_INFO printk(KERN_INFO "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n", "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n", device_info->dev->bus_id, device_info->dev->bus_id, device_info->sbp_allocs, device_info->lbp_allocs, device_info->small.allocs, device_info->large.allocs, device_info->total_allocs - device_info->sbp_allocs - device_info->total_allocs - device_info->small.allocs - device_info->lbp_allocs, device_info->large.allocs, device_info->total_allocs); device_info->total_allocs); } } #endif #endif Loading @@ -109,15 +114,19 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir) { { struct safe_buffer *buf; struct safe_buffer *buf; struct dma_pool *pool; struct dmabounce_pool *pool; struct device *dev = device_info->dev; struct device *dev = device_info->dev; void *safe; dma_addr_t safe_dma_addr; dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", __func__, ptr, size, dir); __func__, ptr, size, dir); DO_STATS ( device_info->total_allocs++ ); if (size <= device_info->small.size) { pool = &device_info->small; } else if (size <= device_info->large.size) { pool = &device_info->large; } else { pool = NULL; } buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); if (buf == NULL) { if (buf == NULL) { Loading @@ -125,23 +134,21 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, return NULL; return NULL; } } if (size <= device_info->small_buffer_size) { buf->ptr = ptr; pool = device_info->small_buffer_pool; buf->size = size; safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr); buf->direction = dir; buf->pool = pool; DO_STATS ( device_info->sbp_allocs++ ); } else if (size <= device_info->large_buffer_size) { pool = device_info->large_buffer_pool; safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr); DO_STATS ( device_info->lbp_allocs++ ); if (pool) { buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, &buf->safe_dma_addr); } else { } else { pool = NULL; buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr, safe = dma_alloc_coherent(dev, size, &safe_dma_addr, GFP_ATOMIC); GFP_ATOMIC); } } if (safe == NULL) { if (buf->safe == NULL) { dev_warn(device_info->dev, dev_warn(dev, "%s: could not alloc dma memory (size=%d)\n", "%s: could not alloc dma memory (size=%d)\n", __func__, size); __func__, size); kfree(buf); kfree(buf); Loading @@ -149,17 +156,13 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, } } #ifdef STATS #ifdef STATS if (pool) pool->allocs++; device_info->total_allocs++; if (device_info->total_allocs % 1000 == 0) if (device_info->total_allocs % 1000 == 0) print_alloc_stats(device_info); print_alloc_stats(device_info); #endif #endif buf->ptr = ptr; buf->size = size; buf->direction = dir; buf->pool = pool; buf->safe = safe; buf->safe_dma_addr = safe_dma_addr; list_add(&buf->node, &device_info->safe_buffers); list_add(&buf->node, &device_info->safe_buffers); return buf; return buf; Loading @@ -186,7 +189,7 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer * list_del(&buf->node); list_del(&buf->node); if (buf->pool) if (buf->pool) dma_pool_free(buf->pool, buf->safe, buf->safe_dma_addr); dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); else else dma_free_coherent(device_info->dev, buf->size, buf->safe, dma_free_coherent(device_info->dev, buf->size, buf->safe, buf->safe_dma_addr); buf->safe_dma_addr); Loading @@ -197,12 +200,10 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer * /* ************************************************** */ /* ************************************************** */ #ifdef STATS #ifdef STATS static void print_map_stats(struct dmabounce_device_info *device_info) static void print_map_stats(struct dmabounce_device_info *device_info) { { printk(KERN_INFO dev_info(device_info->dev, "%s: dmabounce: map_op_count=%lu, bounce_count=%lu\n", "dmabounce: map_op_count=%lu, bounce_count=%lu\n", device_info->dev->bus_id, device_info->map_op_count, device_info->bounce_count); device_info->map_op_count, device_info->bounce_count); } } #endif #endif Loading Loading @@ -258,13 +259,13 @@ map_single(struct device *dev, void *ptr, size_t size, __func__, ptr, buf->safe, size); __func__, ptr, buf->safe, size); memcpy(buf->safe, ptr, size); memcpy(buf->safe, ptr, size); } } consistent_sync(buf->safe, size, dir); ptr = buf->safe; dma_addr = buf->safe_dma_addr; dma_addr = buf->safe_dma_addr; } else { consistent_sync(ptr, size, dir); } } consistent_sync(ptr, size, dir); return dma_addr; return dma_addr; } } Loading @@ -278,7 +279,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, /* /* * Trying to unmap an invalid mapping * Trying to unmap an invalid mapping */ */ if (dma_addr == ~0) { if (dma_mapping_error(dma_addr)) { dev_err(dev, "Trying to unmap invalid mapping\n"); dev_err(dev, "Trying to unmap invalid mapping\n"); return; return; } } Loading Loading @@ -570,11 +571,25 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, local_irq_restore(flags); local_irq_restore(flags); } } static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name, unsigned long size) { pool->size = size; DO_STATS(pool->allocs = 0); pool->pool = dma_pool_create(name, dev, size, 0 /* byte alignment */, 0 /* no page-crossing issues */); return pool->pool ? 0 : -ENOMEM; } int int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, unsigned long large_buffer_size) unsigned long large_buffer_size) { { struct dmabounce_device_info *device_info; struct dmabounce_device_info *device_info; int ret; device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); if (!device_info) { if (!device_info) { Loading @@ -584,45 +599,31 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, return -ENOMEM; return -ENOMEM; } } device_info->small_buffer_pool = ret = dmabounce_init_pool(&device_info->small, dev, dma_pool_create("small_dmabounce_pool", "small_dmabounce_pool", small_buffer_size); dev, if (ret) { small_buffer_size, dev_err(dev, 0 /* byte alignment */, "dmabounce: could not allocate DMA pool for %ld byte objects\n", 0 /* no page-crossing issues */); small_buffer_size); if (!device_info->small_buffer_pool) { goto err_free; printk(KERN_ERR "dmabounce: could not allocate small DMA pool for %s\n", dev->bus_id); kfree(device_info); return -ENOMEM; } } if (large_buffer_size) { if (large_buffer_size) { device_info->large_buffer_pool = ret = dmabounce_init_pool(&device_info->large, dev, dma_pool_create("large_dmabounce_pool", "large_dmabounce_pool", dev, large_buffer_size); large_buffer_size, if (ret) { 0 /* byte alignment */, dev_err(dev, 0 /* no page-crossing issues */); "dmabounce: could not allocate DMA pool for %ld byte objects\n", if (!device_info->large_buffer_pool) { large_buffer_size); printk(KERN_ERR goto err_destroy; "dmabounce: could not allocate large DMA pool for %s\n", dev->bus_id); dma_pool_destroy(device_info->small_buffer_pool); return -ENOMEM; } } } } device_info->dev = dev; device_info->dev = dev; device_info->small_buffer_size = small_buffer_size; device_info->large_buffer_size = large_buffer_size; INIT_LIST_HEAD(&device_info->safe_buffers); INIT_LIST_HEAD(&device_info->safe_buffers); #ifdef STATS #ifdef STATS device_info->sbp_allocs = 0; device_info->lbp_allocs = 0; device_info->total_allocs = 0; device_info->total_allocs = 0; device_info->map_op_count = 0; device_info->map_op_count = 0; device_info->bounce_count = 0; device_info->bounce_count = 0; Loading @@ -634,6 +635,12 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, dev->bus_id, dev->bus->name); dev->bus_id, dev->bus->name); return 0; return 0; err_destroy: dma_pool_destroy(device_info->small.pool); err_free: kfree(device_info); return ret; } } void void Loading @@ -655,10 +662,10 @@ dmabounce_unregister_dev(struct device *dev) BUG(); BUG(); } } if (device_info->small_buffer_pool) if (device_info->small.pool) dma_pool_destroy(device_info->small_buffer_pool); dma_pool_destroy(device_info->small.pool); if (device_info->large_buffer_pool) if (device_info->large.pool) dma_pool_destroy(device_info->large_buffer_pool); dma_pool_destroy(device_info->large.pool); #ifdef STATS #ifdef STATS print_alloc_stats(device_info); print_alloc_stats(device_info); Loading arch/arm/configs/ixdp2400_defconfig +1 −1 Original line number Original line Diff line number Diff line Loading @@ -559,7 +559,7 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 # # CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_NR_UARTS=1 # CONFIG_SERIAL_8250_EXTENDED is not set # CONFIG_SERIAL_8250_EXTENDED is not set # # Loading arch/arm/configs/ixdp2800_defconfig +1 −1 Original line number Original line Diff line number Diff line Loading @@ -559,7 +559,7 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 # # CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_NR_UARTS=1 # CONFIG_SERIAL_8250_EXTENDED is not set # CONFIG_SERIAL_8250_EXTENDED is not set # # Loading arch/arm/kernel/traps.c +17 −12 Original line number Original line Diff line number Diff line Loading @@ -198,25 +198,16 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) barrier(); barrier(); } } DEFINE_SPINLOCK(die_lock); static void __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) /* * This function is protected against re-entrancy. */ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) { { struct task_struct *tsk = current; struct task_struct *tsk = thread->task; static int die_counter; static int die_counter; console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); printk("Internal error: %s: %x [#%d]\n", str, err, ++die_counter); printk("Internal error: %s: %x [#%d]\n", str, err, ++die_counter); print_modules(); print_modules(); __show_regs(regs); __show_regs(regs); printk("Process %s (pid: %d, stack limit = 0x%p)\n", printk("Process %s (pid: %d, stack limit = 0x%p)\n", tsk->comm, tsk->pid, tsk->thread_info + 1); tsk->comm, tsk->pid, thread + 1); if (!user_mode(regs) || in_interrupt()) { if (!user_mode(regs) || in_interrupt()) { dump_mem("Stack: ", regs->ARM_sp, dump_mem("Stack: ", regs->ARM_sp, Loading @@ -224,7 +215,21 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) dump_backtrace(regs, tsk); dump_backtrace(regs, tsk); dump_instr(regs); dump_instr(regs); } } } DEFINE_SPINLOCK(die_lock); /* * This function is protected against re-entrancy. */ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) { struct thread_info *thread = current_thread_info(); console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); __die(str, err, thread, regs); bust_spinlocks(0); bust_spinlocks(0); spin_unlock_irq(&die_lock); spin_unlock_irq(&die_lock); do_exit(SIGSEGV); do_exit(SIGSEGV); Loading arch/arm/lib/ashldi3.S 0 → 100644 +48 −0 Original line number Original line Diff line number Diff line /* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005 Free Software Foundation, Inc. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/linkage.h> #ifdef __ARMEB__ #define al r1 #define ah r0 #else #define al r0 #define ah r1 #endif ENTRY(__ashldi3) subs r3, r2, #32 rsb ip, r2, #32 movmi ah, ah, lsl r2 movpl ah, al, lsl r3 orrmi ah, ah, al, lsr ip mov al, al, lsl r2 mov pc, lr Loading
arch/arm/common/dmabounce.c +86 −79 Original line number Original line Diff line number Diff line Loading @@ -33,8 +33,8 @@ #include <asm/cacheflush.h> #include <asm/cacheflush.h> #undef DEBUG #undef DEBUG #undef STATS #undef STATS #ifdef STATS #ifdef STATS #define DO_STATS(X) do { X ; } while (0) #define DO_STATS(X) do { X ; } while (0) #else #else Loading @@ -52,26 +52,31 @@ struct safe_buffer { int direction; int direction; /* safe buffer info */ /* safe buffer info */ struct dma_pool *pool; struct dmabounce_pool *pool; void *safe; void *safe; dma_addr_t safe_dma_addr; dma_addr_t safe_dma_addr; }; }; struct dmabounce_pool { unsigned long size; struct dma_pool *pool; #ifdef STATS unsigned long allocs; #endif }; struct dmabounce_device_info { struct dmabounce_device_info { struct list_head node; struct list_head node; struct device *dev; struct device *dev; struct dma_pool *small_buffer_pool; struct dma_pool *large_buffer_pool; struct list_head safe_buffers; struct list_head safe_buffers; unsigned long small_buffer_size, large_buffer_size; #ifdef STATS #ifdef STATS unsigned long sbp_allocs; unsigned long lbp_allocs; unsigned long total_allocs; unsigned long total_allocs; unsigned long map_op_count; unsigned long map_op_count; unsigned long bounce_count; unsigned long bounce_count; #endif #endif struct dmabounce_pool small; struct dmabounce_pool large; }; }; static LIST_HEAD(dmabounce_devs); static LIST_HEAD(dmabounce_devs); Loading @@ -82,9 +87,9 @@ static void print_alloc_stats(struct dmabounce_device_info *device_info) printk(KERN_INFO printk(KERN_INFO "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n", "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n", device_info->dev->bus_id, device_info->dev->bus_id, device_info->sbp_allocs, device_info->lbp_allocs, device_info->small.allocs, device_info->large.allocs, device_info->total_allocs - device_info->sbp_allocs - device_info->total_allocs - device_info->small.allocs - device_info->lbp_allocs, device_info->large.allocs, device_info->total_allocs); device_info->total_allocs); } } #endif #endif Loading @@ -109,15 +114,19 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir) { { struct safe_buffer *buf; struct safe_buffer *buf; struct dma_pool *pool; struct dmabounce_pool *pool; struct device *dev = device_info->dev; struct device *dev = device_info->dev; void *safe; dma_addr_t safe_dma_addr; dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", __func__, ptr, size, dir); __func__, ptr, size, dir); DO_STATS ( device_info->total_allocs++ ); if (size <= device_info->small.size) { pool = &device_info->small; } else if (size <= device_info->large.size) { pool = &device_info->large; } else { pool = NULL; } buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); if (buf == NULL) { if (buf == NULL) { Loading @@ -125,23 +134,21 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, return NULL; return NULL; } } if (size <= device_info->small_buffer_size) { buf->ptr = ptr; pool = device_info->small_buffer_pool; buf->size = size; safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr); buf->direction = dir; buf->pool = pool; DO_STATS ( device_info->sbp_allocs++ ); } else if (size <= device_info->large_buffer_size) { pool = device_info->large_buffer_pool; safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr); DO_STATS ( device_info->lbp_allocs++ ); if (pool) { buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, &buf->safe_dma_addr); } else { } else { pool = NULL; buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr, safe = dma_alloc_coherent(dev, size, &safe_dma_addr, GFP_ATOMIC); GFP_ATOMIC); } } if (safe == NULL) { if (buf->safe == NULL) { dev_warn(device_info->dev, dev_warn(dev, "%s: could not alloc dma memory (size=%d)\n", "%s: could not alloc dma memory (size=%d)\n", __func__, size); __func__, size); kfree(buf); kfree(buf); Loading @@ -149,17 +156,13 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, } } #ifdef STATS #ifdef STATS if (pool) pool->allocs++; device_info->total_allocs++; if (device_info->total_allocs % 1000 == 0) if (device_info->total_allocs % 1000 == 0) print_alloc_stats(device_info); print_alloc_stats(device_info); #endif #endif buf->ptr = ptr; buf->size = size; buf->direction = dir; buf->pool = pool; buf->safe = safe; buf->safe_dma_addr = safe_dma_addr; list_add(&buf->node, &device_info->safe_buffers); list_add(&buf->node, &device_info->safe_buffers); return buf; return buf; Loading @@ -186,7 +189,7 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer * list_del(&buf->node); list_del(&buf->node); if (buf->pool) if (buf->pool) dma_pool_free(buf->pool, buf->safe, buf->safe_dma_addr); dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); else else dma_free_coherent(device_info->dev, buf->size, buf->safe, dma_free_coherent(device_info->dev, buf->size, buf->safe, buf->safe_dma_addr); buf->safe_dma_addr); Loading @@ -197,12 +200,10 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer * /* ************************************************** */ /* ************************************************** */ #ifdef STATS #ifdef STATS static void print_map_stats(struct dmabounce_device_info *device_info) static void print_map_stats(struct dmabounce_device_info *device_info) { { printk(KERN_INFO dev_info(device_info->dev, "%s: dmabounce: map_op_count=%lu, bounce_count=%lu\n", "dmabounce: map_op_count=%lu, bounce_count=%lu\n", device_info->dev->bus_id, device_info->map_op_count, device_info->bounce_count); device_info->map_op_count, device_info->bounce_count); } } #endif #endif Loading Loading @@ -258,13 +259,13 @@ map_single(struct device *dev, void *ptr, size_t size, __func__, ptr, buf->safe, size); __func__, ptr, buf->safe, size); memcpy(buf->safe, ptr, size); memcpy(buf->safe, ptr, size); } } consistent_sync(buf->safe, size, dir); ptr = buf->safe; dma_addr = buf->safe_dma_addr; dma_addr = buf->safe_dma_addr; } else { consistent_sync(ptr, size, dir); } } consistent_sync(ptr, size, dir); return dma_addr; return dma_addr; } } Loading @@ -278,7 +279,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, /* /* * Trying to unmap an invalid mapping * Trying to unmap an invalid mapping */ */ if (dma_addr == ~0) { if (dma_mapping_error(dma_addr)) { dev_err(dev, "Trying to unmap invalid mapping\n"); dev_err(dev, "Trying to unmap invalid mapping\n"); return; return; } } Loading Loading @@ -570,11 +571,25 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, local_irq_restore(flags); local_irq_restore(flags); } } static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name, unsigned long size) { pool->size = size; DO_STATS(pool->allocs = 0); pool->pool = dma_pool_create(name, dev, size, 0 /* byte alignment */, 0 /* no page-crossing issues */); return pool->pool ? 0 : -ENOMEM; } int int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, unsigned long large_buffer_size) unsigned long large_buffer_size) { { struct dmabounce_device_info *device_info; struct dmabounce_device_info *device_info; int ret; device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); if (!device_info) { if (!device_info) { Loading @@ -584,45 +599,31 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, return -ENOMEM; return -ENOMEM; } } device_info->small_buffer_pool = ret = dmabounce_init_pool(&device_info->small, dev, dma_pool_create("small_dmabounce_pool", "small_dmabounce_pool", small_buffer_size); dev, if (ret) { small_buffer_size, dev_err(dev, 0 /* byte alignment */, "dmabounce: could not allocate DMA pool for %ld byte objects\n", 0 /* no page-crossing issues */); small_buffer_size); if (!device_info->small_buffer_pool) { goto err_free; printk(KERN_ERR "dmabounce: could not allocate small DMA pool for %s\n", dev->bus_id); kfree(device_info); return -ENOMEM; } } if (large_buffer_size) { if (large_buffer_size) { device_info->large_buffer_pool = ret = dmabounce_init_pool(&device_info->large, dev, dma_pool_create("large_dmabounce_pool", "large_dmabounce_pool", dev, large_buffer_size); large_buffer_size, if (ret) { 0 /* byte alignment */, dev_err(dev, 0 /* no page-crossing issues */); "dmabounce: could not allocate DMA pool for %ld byte objects\n", if (!device_info->large_buffer_pool) { large_buffer_size); printk(KERN_ERR goto err_destroy; "dmabounce: could not allocate large DMA pool for %s\n", dev->bus_id); dma_pool_destroy(device_info->small_buffer_pool); return -ENOMEM; } } } } device_info->dev = dev; device_info->dev = dev; device_info->small_buffer_size = small_buffer_size; device_info->large_buffer_size = large_buffer_size; INIT_LIST_HEAD(&device_info->safe_buffers); INIT_LIST_HEAD(&device_info->safe_buffers); #ifdef STATS #ifdef STATS device_info->sbp_allocs = 0; device_info->lbp_allocs = 0; device_info->total_allocs = 0; device_info->total_allocs = 0; device_info->map_op_count = 0; device_info->map_op_count = 0; device_info->bounce_count = 0; device_info->bounce_count = 0; Loading @@ -634,6 +635,12 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, dev->bus_id, dev->bus->name); dev->bus_id, dev->bus->name); return 0; return 0; err_destroy: dma_pool_destroy(device_info->small.pool); err_free: kfree(device_info); return ret; } } void void Loading @@ -655,10 +662,10 @@ dmabounce_unregister_dev(struct device *dev) BUG(); BUG(); } } if (device_info->small_buffer_pool) if (device_info->small.pool) dma_pool_destroy(device_info->small_buffer_pool); dma_pool_destroy(device_info->small.pool); if (device_info->large_buffer_pool) if (device_info->large.pool) dma_pool_destroy(device_info->large_buffer_pool); dma_pool_destroy(device_info->large.pool); #ifdef STATS #ifdef STATS print_alloc_stats(device_info); print_alloc_stats(device_info); Loading
arch/arm/configs/ixdp2400_defconfig +1 −1 Original line number Original line Diff line number Diff line Loading @@ -559,7 +559,7 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 # # CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_NR_UARTS=1 # CONFIG_SERIAL_8250_EXTENDED is not set # CONFIG_SERIAL_8250_EXTENDED is not set # # Loading
arch/arm/configs/ixdp2800_defconfig +1 −1 Original line number Original line Diff line number Diff line Loading @@ -559,7 +559,7 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 # # CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_NR_UARTS=1 # CONFIG_SERIAL_8250_EXTENDED is not set # CONFIG_SERIAL_8250_EXTENDED is not set # # Loading
arch/arm/kernel/traps.c +17 −12 Original line number Original line Diff line number Diff line Loading @@ -198,25 +198,16 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) barrier(); barrier(); } } DEFINE_SPINLOCK(die_lock); static void __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) /* * This function is protected against re-entrancy. */ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) { { struct task_struct *tsk = current; struct task_struct *tsk = thread->task; static int die_counter; static int die_counter; console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); printk("Internal error: %s: %x [#%d]\n", str, err, ++die_counter); printk("Internal error: %s: %x [#%d]\n", str, err, ++die_counter); print_modules(); print_modules(); __show_regs(regs); __show_regs(regs); printk("Process %s (pid: %d, stack limit = 0x%p)\n", printk("Process %s (pid: %d, stack limit = 0x%p)\n", tsk->comm, tsk->pid, tsk->thread_info + 1); tsk->comm, tsk->pid, thread + 1); if (!user_mode(regs) || in_interrupt()) { if (!user_mode(regs) || in_interrupt()) { dump_mem("Stack: ", regs->ARM_sp, dump_mem("Stack: ", regs->ARM_sp, Loading @@ -224,7 +215,21 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) dump_backtrace(regs, tsk); dump_backtrace(regs, tsk); dump_instr(regs); dump_instr(regs); } } } DEFINE_SPINLOCK(die_lock); /* * This function is protected against re-entrancy. */ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) { struct thread_info *thread = current_thread_info(); console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); __die(str, err, thread, regs); bust_spinlocks(0); bust_spinlocks(0); spin_unlock_irq(&die_lock); spin_unlock_irq(&die_lock); do_exit(SIGSEGV); do_exit(SIGSEGV); Loading
arch/arm/lib/ashldi3.S 0 → 100644 +48 −0 Original line number Original line Diff line number Diff line /* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005 Free Software Foundation, Inc. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combine executable.) This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/linkage.h> #ifdef __ARMEB__ #define al r1 #define ah r0 #else #define al r0 #define ah r1 #endif ENTRY(__ashldi3) subs r3, r2, #32 rsb ip, r2, #32 movmi ah, ah, lsl r2 movpl ah, al, lsl r3 orrmi ah, ah, al, lsr ip mov al, al, lsl r2 mov pc, lr